content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def valida_cnpj(cnpj):
"""
Valida CNPJs, retornando apenas a string de números válida.
# CNPJs errados
>>> validar_cnpj('abcdefghijklmn')
False
>>> validar_cnpj('123')
False
>>> validar_cnpj('')
False
>>> validar_cnpj(None)
False
>>> validar_cnpj('12345678901234')
False
>>> validar_cnpj('11222333000100')
False
# CNPJs corretos
>>> validar_cnpj('11222333000181')
'11222333000181'
>>> validar_cnpj('11.222.333/0001-81')
'11222333000181'
>>> validar_cnpj(' 11 222 333 0001 81 ')
'11222333000181'
"""
cnpj = ''.join(re.findall('\d', str(cnpj)))
if (not cnpj) or (len(cnpj) < 14):
return False
# Pega apenas os 12 primeiros dígitos do CNPJ e
# gera os 2 dígitos que faltam
inteiros = list(map(int, cnpj))
novo = inteiros[:12]
prod = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
while len(novo) < 14:
r = sum([x*y for (x, y) in zip(novo, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
novo.append(f)
prod.insert(0, 6)
# Se o número gerado coincidir com o número original, é válido
if novo == inteiros:
return cnpj
return False | 4b3d2591e6f196cccdd8d68089e36f22ba1d1a98 | 1,723 |
def km_miles(kilometers):
"""Usage: Convert kilometers to miles"""
return kilometers/1.609 | 5480c065f904dfc1959691e158653fd0e6bb67e6 | 1,724 |
def is_enterprise_learner(user):
"""
Check if the given user belongs to an enterprise. Cache the value if an enterprise learner is found.
Arguments:
user (User): Django User object.
Returns:
(bool): True if given user is an enterprise learner.
"""
cached_is_enterprise_key = get_is_enterprise_cache_key(user.id)
if cache.get(cached_is_enterprise_key):
return True
if EnterpriseCustomerUser.objects.filter(user_id=user.id).exists():
# Cache the enterprise user for one hour.
cache.set(cached_is_enterprise_key, True, 3600)
return True
return False | 76bbf24dafec3ec26ec23504b8d064fbe5c21c52 | 1,725 |
def point_cloud(depth, colors):
"""Transform a depth image into a point cloud with one point for each
pixel in the image, using the camera transform for a camera
centred at cx, cy with field of view fx, fy.
depth is a 2-D ndarray with shape (rows, cols) containing
depths from 1 to 254 inclusive. The result is a 3-D array with
shape (rows, cols, 3). Pixels with invalid depth in the input have
NaN for the z-coordinate in the result.
"""
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)
valid = (depth > 0) & (depth < 255)
z = np.where(valid, depth / 256.0, np.nan)
x = np.where(valid, z * (c - cx) / fx, 0)
y = np.where(valid, z * (r - cy) / fy, 0)
points = np.dstack((x, y, z))
print('points:{}, colors:{}'.format(np.shape(points), np.shape(colors)))
reflect_matrix = np.identity(3) # reflect on x axis
reflect_matrix[0] *= -1
points = np.matmul(points, reflect_matrix)
out_fn = 'point_cloud.ply'
# filter by min disparity
mask = disparity > disparity.min()
out_points = points[mask]
out_colors = colors[mask]
idx = np.fabs(out_points[:, -1]) < 50 # 10.5 # filter by dimension
print('out_points:{}'.format(np.shape(out_points)))
out_points = out_points[idx]
out_colors = out_colors.reshape(-1, 3)
out_colors = out_colors[idx]
write_ply(out_fn, out_points, out_colors)
# reproject on the image -----------------------------------
reflected_pts = np.matmul(out_points, reflect_matrix)
projected_img, _ = cv2.projectPoints(reflected_pts, np.identity(3), np.array([0., 0., 0.]), K_left, D_left)
projected_img = projected_img.reshape(-1, 2)
blank_img = np.zeros(colors.shape, 'uint8')
img_colors = colors[mask][idx].reshape(-1, 3)
for i, pt in enumerate(projected_img):
pt_x = int(pt[0])
pt_y = int(pt[1])
if pt_x > 0 and pt_y > 0:
# use the BGR format to match the original image type
col = (int(img_colors[i, 2]), int(img_colors[i, 1]), int(img_colors[i, 0]))
cv2.circle(blank_img, (pt_x, pt_y), 1, col)
return blank_img, out_points | 75aa681fa817b29e23ed76beb8504ef1bbaa5d67 | 1,726 |
import tqdm
def structural_email(data, pos_parser=True, bytedata_parser_threshold=50, reference_parser_match_type=2):
"""
This is a parser pipeline, parser order matters.
1. string => structure email to separate => header, body, others
2. body => remove typo and some irrelevant words => body
3. body => parse and remove email from body => body_no_email
4. body_no_email => parse and remove binary data like BMP or picture from body => body_no_binary_no_email
5. body_no_binary_no_email => separate email reference and reply => reply, previous_one, previous_two
@param data: data text series including all the training set or test set
@return: structural information
"""
print("Preprocessing for unstructure email...")
header_info = []
body_info = []
others_info = []
tag_info = []
for string in tqdm(data):
# structure parsers
header, body, others = structure_parser(string)
body = typo_parser(body)
body_no_email, emails = email_address_parser(body)
body_no_binary_no_email, bytedata = bytedata_parser(body_no_email, threshold=bytedata_parser_threshold)
# main parser
reply, previous_one, previous_two = reference_parser(body_no_binary_no_email, match_type=reference_parser_match_type)
if pos_parser:
target_tag = set(['NN', 'NNS', 'NNPS'])
tag_reply = pos_tag_parser(reply, target_tag)
tag_previous_one = pos_tag_parser(previous_one, target_tag)
tag_previous_two = pos_tag_parser(previous_two, target_tag)
tag_info.append([tag_reply, tag_previous_one, tag_previous_two])
# append data in loops
header_info.append(header)
body_info.append([reply, previous_one, previous_two])
others_info.append(others + [emails] + [bytedata])
a1 = pd.DataFrame.from_dict(header_info)
a2 = pd.DataFrame(body_info, columns=["reply", "reference_one", "reference_two"])
a3 = pd.DataFrame(others_info, columns=["date", "delivered_to", "to_domains", "error_message", "contained_emails", "long_string"])
if pos_parser:
a4 = pd.DataFrame(tag_info, columns=["tag_reply", "tag_reference_one", "tag_reference_two"])
structure_email = pd.concat([a1, a2, a3, a4], axis=1)
else:
structure_email = pd.concat([a1, a2, a3], axis=1)
return structure_email | b68227f10ae6e78f6e12ab174e2360c0828e2038 | 1,727 |
import six
def build_batches(data, conf, turn_cut_type='tail', term_cut_type='tail'):
"""
Build batches
"""
_turns_batches = []
_tt_turns_len_batches = []
_every_turn_len_batches = []
_response_batches = []
_response_len_batches = []
_label_batches = []
batch_len = len(data[six.b('y')]) // conf['batch_size']
for batch_index in six.moves.range(batch_len):
_turns, _tt_turns_len, _every_turn_len, _response, _response_len, _label = build_one_batch(
data, batch_index, conf, turn_cut_type='tail', term_cut_type='tail')
_turns_batches.append(_turns)
_tt_turns_len_batches.append(_tt_turns_len)
_every_turn_len_batches.append(_every_turn_len)
_response_batches.append(_response)
_response_len_batches.append(_response_len)
_label_batches.append(_label)
ans = {
"turns": _turns_batches,
"tt_turns_len": _tt_turns_len_batches,
"every_turn_len": _every_turn_len_batches,
"response": _response_batches,
"response_len": _response_len_batches,
"label": _label_batches
}
return ans | e82411d5b51171c9590bd5f150dfeca666b3a3a6 | 1,728 |
def is_notebook():
"""Check if pyaedt is running in Jupyter or not.
Returns
-------
bool
"""
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
else:
return False
except NameError:
return False | 51c0806ba17cbaef5732379a5e9c68d8eb171d31 | 1,729 |
def strategy(history, memory):
"""
Tit-for-tat, except we punish them N times in a row if this is the Nth time they've
initiated a defection.
memory: (initiatedDefections, remainingPunitiveDefections)
"""
if memory is not None and memory[1] > 0:
choice = 0
memory = (memory[0], memory[1] - 1)
return choice, memory
num_rounds = history.shape[1]
opponents_last_move = history[1, -1] if num_rounds >= 1 else 1
our_last_move = history[0, -1] if num_rounds >= 1 else 1
our_second_last_move = history[0, -2] if num_rounds >= 2 else 1
opponent_initiated_defection = (
opponents_last_move == 0 and our_last_move == 1 and our_second_last_move == 1
)
choice = 0 if opponent_initiated_defection else 1
if choice == 0:
memory = (1, 0) if memory is None else (memory[0] + 1, memory[0])
return choice, memory | bf8d09417c246f9f88a721dfcc4408f49195fd1a | 1,730 |
def get_primitives(name=None, primitive_type=None, primitive_subtype=None):
"""Get a list of the available primitives.
Optionally filter by primitive type: ``transformation`` or ``aggregation``.
Args:
primitive_type (str):
Filter by primitive type. ``transformation`` or ``aggregation``.
Returns:
list:
List of the names of the available primitives.
"""
filters = {}
if primitive_type:
if primitive_type not in ('transformation', 'aggregation'):
raise ValueError('primitive_type must be `transformation` or `aggregation`.')
filters['classifiers.type'] = primitive_type
if primitive_subtype:
if primitive_subtype not in ('amplitude', 'frequency', 'frequency_time'):
raise ValueError(
'primitive_subtype must be `amplitude`, `frequency` or `frequency_time`.')
filters['classifiers.subtype'] = primitive_subtype
return discovery.find_primitives(name or 'sigpro', filters) | c833a2b1d52dc135a4518aa6fa7147ae58b73b9a | 1,731 |
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = nnvm.sym.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = nnvm.sym.reshape(data, shape=old_shape)
return data | 1b59f6fbceabef3a28b4180a5bc808621e11c6b7 | 1,732 |
def get_branch_user(branch):
"""Get user name for given branch."""
with Command('git', 'log', '--pretty=tformat:%an', '-1', branch) as cmd:
for line in cmd:
return line | 0845dc69cbd949c1f739ca877c0b182740fa7bdb | 1,733 |
from .qtmultimedia import find_system_cameras
from electrum_cintamani import qrscanner
def find_system_cameras() -> Mapping[str, str]:
"""Returns a camera_description -> camera_path map."""
if sys.platform == 'darwin' or sys.platform in ('windows', 'win32'):
try:
except ImportError as e:
return {}
else:
return find_system_cameras()
else: # desktop Linux and similar
return qrscanner.find_system_cameras() | adefb85f99494f71e1c55e74f8b4e589d96daacf | 1,734 |
def _shape_list(x):
"""Return list of dims, statically where possible."""
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, static_dim in enumerate(static):
dim = static_dim or shape[i]
ret.append(dim)
return ret | 0add2ba771dd99817654ce48c745db5c5f09d3aa | 1,735 |
import packaging
def upgrade_common(ctx, config, deploy_style):
"""
Common code for upgrading
"""
remotes = upgrade_remote_to_config(ctx, config)
project = config.get('project', 'ceph')
extra_pkgs = config.get('extra_packages', [])
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
for remote, node in remotes.items():
system_type = teuthology.get_system_type(remote)
assert system_type in ('deb', 'rpm')
pkgs = get_package_list(ctx, config)[system_type]
log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
if isinstance(extra_pkgs, dict):
pkgs += extra_pkgs.get(system_type, [])
else:
pkgs += extra_pkgs
installed_version = packaging.get_package_version(remote, 'ceph-common')
upgrade_version = get_upgrade_version(ctx, node, remote)
log.info("Ceph {s} upgrade from {i} to {u}".format(
s=system_type,
i=installed_version,
u=upgrade_version
))
if _upgrade_is_downgrade(installed_version, upgrade_version):
raise RuntimeError(
"An attempt to upgrade from a higher version to a lower one "
"will always fail. Hint: check tags in the target git branch."
)
deploy_style(ctx, node, remote, pkgs, system_type)
verify_package_version(ctx, node, remote)
return len(remotes) | 13840203bceb6b6ae069d47fa03a278dac3b0bc6 | 1,736 |
def convert2define(name):
"""
returns the name of the define used according to 'name' which is the name of the file
"""
header = toupper(toalphanum(name))
return "__" + header + "__" | 9f48181310db2732a26b846cb8270eb44bd06004 | 1,737 |
def url_exists(url):
"""
Checks if a url exists
:param url:
:return:
"""
p = urlparse(url)
conn = httplib.HTTPConnection(p.netloc)
conn.request('HEAD', p.path)
resp = conn.getresponse()
return resp.status == 301 or resp.status == 200 | 3ef7d71fed0c85d4e75e910e5354b817c656c0d7 | 1,738 |
def add_header(cmd):
"""
:param cmd: the command with its values
:return: adds a header and returns it, ready to be send
"""
# get the length of the length of the cmd (for how many spaces needed)
header = str(len(cmd))
for i in range(get_digits(len(cmd)), HEADERSIZE):
header = header + " "
return header + cmd | 91a23eaee6ddd01ce5b5d62b3d43221b25bcd541 | 1,739 |
def stat_selector(player, stat, in_path, year):
"""
Selects stat for player in game year selected
Parameters
----------
player
The player being assessed (str)
stat
The stat being assessed (str)
in_path
The path to the folder containing player data (str)
year
The year of game to look at (int)
Returns
-------
stat_selected
A number indicating the selected stat value (int)
"""
df = fifa_file_opener(in_path, year)
player_row = df.loc[df["short_name"] == player]
stat_selected = int(player_row[stat])
return stat_selected | 7f04086e4e3baee273baa1b90e1e0735856091d5 | 1,740 |
import torch
def get_cali_samples(train_data_loader, num_samples, no_label=True):
"""Generate sub-dataset for calibration.
Args:
train_data_loader (torch.utils.data.DataLoader):
num_samples (int):
no_label (bool, optional): If the dataloader has no labels. Defaults to True.
Returns:
torch.Tensor: Concatenated data matrix.
"""
cali_data_list = []
if no_label:
for batch_data in train_data_loader:
cali_data_list.append(batch_data["image"])
if len(cali_data_list) >= num_samples:
break
else:
for batch_data, _ in train_data_loader:
cali_data_list.append(batch_data)
if len(cali_data_list) >= num_samples:
break
return torch.cat(cali_data_list, dim=0)[:num_samples].cpu() | 297ea0384b1e7f0a6ea51fc37325e57eb1cb8afa | 1,741 |
from typing import List
from typing import Tuple
import requests
import json
def fetch_available_litteraturbanken_books() -> List[Tuple[str, str]]:
"""Fetch available books from Litteraturbanken."""
url = "https://litteraturbanken.se/api/list_all/etext?exclude=text,parts,sourcedesc,pages,errata&filter_and=%7B%22sort_date_imprint.date:range%22:%221248,2020%22,%22export%3Etype%22:%5B%22xml%22,%22txt%22,%22workdb%22%5D%7D&filter_or=%7B%7D&filter_string=&from=0&include=lbworkid,titlepath,title,titleid,work_titleid,shorttitle,mediatype,searchable,imported,sortfield,sort_date_imprint.plain,main_author.authorid,main_author.surname,main_author.type,work_authors.authorid,work_authors.surname,startpagename,has_epub,sort_date.plain,export&partial_string=true&sort_field=popularity%7Cdesc&suggest=true&to=1000"
response = requests.get(url)
response.raise_for_status()
response = json.loads(response.text)
books = []
for book in response["data"]:
has_text = False
for export in book["export"]:
if export["type"] == "txt":
has_text = True
break
if not has_text:
continue
filename = "LB_{}_{}_{}_etext.txt".format(book["main_author"]["authorid"], book["titleid"], book["sort_date_imprint"]["plain"])
if filename in blacklist:
continue
books.append((filename, book["lbworkid"]))
return books | ff14af499335c6229d1f8d995c343c62fff7db74 | 1,742 |
def soup_from_psf(psf):
"""
Returns a Soup from a .psf file
"""
soup = pdbatoms.Soup()
curr_res_num = None
is_header = True
for line in open(psf):
if is_header:
if "NATOM" in line:
n_atom = int(line.split()[0])
is_header = False
continue
words = line.split()
atom_num = int(words[0])
chain_id = words[1]
res_num = int(words[2])
res_type = words[3]
atom_type = words[4]
charge = float(words[6])
mass = float(words[7])
if chain_id.startswith('WT') or chain_id.startswith('ION'):
is_hetatm = True
chain_id = " "
else:
is_hetatm = False
chain_id = chain_id[0]
if curr_res_num != res_num:
res = pdbatoms.Residue(res_type, chain_id, res_num)
soup.append_residue(res)
curr_res_num = res_num
atom = pdbatoms.Atom()
atom.vel = v3.vector()
atom.chain_id = chain_id
atom.is_hetatm = is_hetatm
atom.num = atom_num
atom.res_num = res_num
atom.res_type = res_type
atom.type = atom_type
atom.mass = mass
atom.charge = charge
atom.element = data.guess_element(res_type, atom_type)
soup.insert_atom(-1, atom)
if len(soup.atoms()) == n_atom:
break
convert_to_pdb_atom_names(soup)
return soup | 6b84e9428bec66e65b0d06dd81b238370f1602a8 | 1,743 |
def check_api():
"""
复核货品入库
post req: withlock
{
erp_order_code,
lines: [{
barcode, location, lpn, qty
},]
w_user_code,
w_user_name
}
"""
w_user_code = request.json.pop('w_user_code', None)
w_user_name = request.json.pop('w_user_name', None)
order = Stockin.query.t_query.filter_by(erp_order_code=request.json.pop('erp_order_code')) \
.with_for_update().first()
if order.state == 'create' or order.state == 'part':
lines = request.json['lines']
action = StockinAction(order)
for line in lines:
line['qty'] = int(line.get('qty', 0) or 0)
if line.get('qty', 0) <= 0:
continue
action.check(order=order, w_user_code=w_user_code, w_user_name=w_user_name, **line)
order.state = 'part'
# 不允许超收时,收完一次后,判断单子是否入库完成; 允许超收的话,单子只能手动关闭
if not g.owner.is_overcharge:
finish = True
for line in order.lines:
if not (line.qty_real >= line.qty):
finish = False
order.state = 'all' if finish else 'part'
if order.state == 'all':
order.finish()
db.session.commit()
return json_response({'status': 'success', 'msg': u'ok', 'data': order.as_dict})
db.session.rollback()
return json_response({'status': 'fail', 'msg': u'订单在(%s)状态中,不能再收货'%(order.state), 'data': order.as_dict}) | 7c91f2c9068f762cb6681210f52ffe7d1a6ca259 | 1,744 |
def quadratic_form(u, Q, v, workers=1, **kwargs):
"""
Compute the quadratic form uQv, with broadcasting
Parameters
----------
u : (..., M) array
The u vectors of the quadratic form uQv
Q : (..., M, N) array
The Q matrices of the quadratic form uQv
v : (..., N) array
The v vectors of the quadratic form uQv
workers : int, optional
The number of parallel threads to use along gufunc loop dimension(s).
If set to -1, the maximum number of threads (as returned by
``multiprocessing.cpu_count()``) are used.
Returns
-------
qf : (...) array
The result of the quadratic forms
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
This is similar to PDL inner2
Examples
--------
The result in absence of broadcasting is just as np.dot(np.dot(u,Q),v)
or np.dot(u, np.dot(Q,v))
>>> u = np.array([2., 3.])
>>> Q = np.array([[1.,1.], [0.,1.]])
>>> v = np.array([1.,2.])
>>> quadratic_form(u,Q,v)
12.0
>>> np.dot(np.dot(u,Q),v)
12.0
>>> np.dot(u, np.dot(Q,v))
12.0
"""
with _setup_gulinalg_threads(workers):
out = _impl.quadratic_form(u, Q, v, **kwargs)
return out | 6cd0abdf3d49ce38ba61ba6da9ee107663b1a8b9 | 1,745 |
def reorg(dat):
"""This function grabs the data from the dictionary of data types
(organized by ID), and combines them into the
:class:`dolfyn.ADPdata` object.
"""
outdat = apb.ADPdata()
cfg = outdat['config'] = db.config(_type='Nortek AD2CP')
cfh = cfg['filehead config'] = dat['filehead config']
cfg['model'] = (cfh['ID'].split(',')[0][5:-1])
outdat['props'] = {}
outdat['props']['inst_make'] = 'Nortek'
outdat['props']['inst_model'] = cfg['model']
outdat['props']['inst_type'] = 'ADP'
for id, tag in [(21, ''), (24, '_b5'), (26, '_ar')]:
if id == 26:
collapse_exclude = [0]
else:
collapse_exclude = []
if id not in dat:
continue
dnow = dat[id]
cfg['burst_config' + tag] = lib.headconfig_int2dict(
lib.collapse(dnow['config'], exclude=collapse_exclude,
name='config'))
outdat['mpltime' + tag] = lib.calc_time(
dnow['year'] + 1900,
dnow['month'],
dnow['day'],
dnow['hour'],
dnow['minute'],
dnow['second'],
dnow['usec100'].astype('uint32') * 100)
tmp = lib.beams_cy_int2dict(
lib.collapse(dnow['beam_config'], exclude=collapse_exclude,
name='beam_config'), 21)
cfg['ncells' + tag] = tmp['ncells']
cfg['coord_sys' + tag] = tmp['cy']
cfg['nbeams' + tag] = tmp['nbeams']
for ky in ['SerialNum', 'cell_size', 'blanking',
'nom_corr', 'data_desc',
'vel_scale', 'power_level']:
# These ones should 'collapse'
# (i.e., all values should be the same)
# So we only need that one value.
cfg[ky + tag] = lib.collapse(dnow[ky], exclude=collapse_exclude,
name=ky)
for ky in ['c_sound', 'temp', 'press',
'heading', 'pitch', 'roll',
'temp_press', 'batt_V',
'temp_mag', 'temp_clock',
'mag', 'accel',
'ambig_vel', 'xmit_energy',
'error', 'status0', 'status',
'_ensemble', 'ensemble']:
# No if statement here
outdat[ky + tag] = dnow[ky]
for ky in [
'vel', 'amp', 'corr',
'alt_dist', 'alt_quality', 'alt_status',
'ast_dist', 'ast_quality', 'ast_offset_time',
'ast_pressure',
'altraw_nsamp', 'altraw_dist', 'altraw_samp',
'echo',
'orientmat', 'angrt',
'percent_good',
'std_pitch', 'std_roll', 'std_heading', 'std_press'
]:
if ky in dnow:
outdat[ky + tag] = dnow[ky]
for grp, keys in defs._burst_group_org.items():
if grp not in outdat and \
len(set(defs._burst_group_org[grp])
.intersection(outdat.keys())):
outdat[grp] = db.TimeData()
for ky in keys:
if ky == grp and ky in outdat and \
not isinstance(outdat[grp], db.TimeData):
tmp = outdat.pop(grp)
outdat[grp] = db.TimeData()
outdat[grp][ky] = tmp
#print(ky, tmp)
if ky + tag in outdat and not \
isinstance(outdat[ky + tag], db.TimeData):
outdat[grp][ky + tag] = outdat.pop(ky + tag)
# Move 'altimeter raw' data to it's own down-sampled structure
if 26 in dat:
ard = outdat['altraw'] = db.MappedTime()
for ky in list(outdat.iter_data(include_hidden=True)):
if ky.endswith('_ar'):
grp = ky.split('.')[0]
if '.' in ky and grp not in ard:
ard[grp] = db.TimeData()
ard[ky.rstrip('_ar')] = outdat.pop(ky)
N = ard['_map_N'] = len(outdat['mpltime'])
parent_map = np.arange(N)
ard['_map'] = parent_map[np.in1d(outdat.sys.ensemble, ard.sys.ensemble)]
outdat['config']['altraw'] = db.config(_type='ALTRAW', **ard.pop('config'))
outdat.props['coord_sys'] = {'XYZ': 'inst',
'ENU': 'earth',
'BEAM': 'beam'}[cfg['coord_sys'].upper()]
tmp = lib.status2data(outdat.sys.status) # returns a dict
outdat.orient['orient_up'] = tmp['orient_up']
# 0: XUP, 1: XDOWN, 4: ZUP, 5: ZDOWN
# Heding is: 0,1: Z; 4,5: X
return outdat | 2389be25e7052016a6a710803b7b661a7eb1606c | 1,746 |
def get_oauth2_service_account_keys():
"""A getter that returns the required OAuth2 service account keys.
Returns:
A tuple containing the required keys as strs.
"""
return _OAUTH2_SERVICE_ACCOUNT_KEYS | bcded81a6884dc40b9f2ccb32e8b14df450b6fd6 | 1,748 |
def grammar_info(df, col):
"""return three separate attributes with
clean abstract, flesh score and sentence count"""
df['clean_abstract'] = clean_text(df[col])
df['flesch_score'] = df[col].apply(flesch_score)
df['sentence_count'] = sentence_count(df[col])
return df | 7606121f68434a760255cca10e75840ca058c50c | 1,751 |
def landing():
"""Landing page"""
return render_template('public/index.html') | 462b8f4451008832c6883be64dc23712bc76c907 | 1,753 |
def uniform_decay(distance_array, scale):
"""
Transform a measurement array using a uniform distribution.
The output is 1 below the scale parameter and 0 above it.
Some sample values. Measurements are in multiple of ``scale``; decay value are in fractions of
the maximum value:
+---------------+---------------+
| measurement | decay value |
+===============+===============+
| 0.0 | 1.0 |
+---------------+---------------+
| 0.25 | 1.0 |
+---------------+---------------+
| 0.5 | 1.0 |
+---------------+---------------+
| 0.75 | 1.0 |
+---------------+---------------+
| 1.0 | 1.0 |
+---------------+---------------+
"""
return (distance_array <= scale).astype(np.float64) | e643e7e962d3b6e29c2c23c0aa682e77a539d04b | 1,754 |
def pid_to_service(pid):
"""
Check if a PID belongs to a systemd service and return its name.
Return None if the PID does not belong to a service.
Uses DBUS if available.
"""
if dbus:
return _pid_to_service_dbus(pid)
else:
return _pid_to_service_systemctl(pid) | 925f67611d83b3304db673e5e3d0c0a7dafd8211 | 1,755 |
def Frequencies(bands, src):
"""
Count the number of scalars in each band.
:param: bands - the bands.
:param: src - the vtkPolyData source.
:return: The frequencies of the scalars in each band.
"""
freq = dict()
for i in range(len(bands)):
freq[i] = 0;
tuples = src.GetPointData().GetScalars().GetNumberOfTuples()
for i in range(tuples):
x = src.GetPointData().GetScalars().GetTuple1(i)
for j in range(len(bands)):
if x <= bands[j][2]:
freq[j] = freq[j] + 1
break
return freq | 081e37f0d2d9d5a70266b24372d75d94d86fcbb0 | 1,756 |
from typing import Callable
import torch
from typing import Dict
def get_loss_fn(loss: str) -> Callable[..., torch.Tensor]:
"""
Get loss function as a PyTorch functional loss based on the name of the loss function.
Choices include 'cross_entropy', 'nll_loss', and 'kl_div'.
Args:
loss: a string indicating the loss function to return.
"""
loss_fn_mapping: Dict[str, Callable[..., torch.Tensor]] = {
'cross_entropy': F.cross_entropy,
'nll_loss': F.nll_loss,
'kl_div': F.kl_div,
}
try:
loss_fn: Callable[..., torch.Tensor] = loss_fn_mapping[loss]
except KeyError:
raise ValueError(f'Loss function {loss} is not supported.')
return loss_fn | ebbb20dba1b7573c615c35d683a59c9a5151b0e9 | 1,757 |
def FormatAddress(chainIDAlias: str, hrp: str, addr: bytes) -> str:
"""FormatAddress takes in a chain prefix, HRP, and byte slice to produce a string for an address."""
addr_str = FormatBech32(hrp, addr)
return f"{chainIDAlias}{addressSep}{addr_str}" | 4004e2367e13abb890d22b653b4ac849bf615d1a | 1,758 |
from typing import List
from operator import or_
async def get_journal_scopes(
db_session: Session, user_id: str, user_group_id_list: List[str], journal_id: UUID
) -> List[JournalPermissions]:
"""
Returns list of all permissions (group user belongs to and user) for provided user and journal.
"""
journal_spec = JournalSpec(id=journal_id)
await find_journal(db_session, journal_spec)
if journal_id is None:
raise JournalNotFound(
"In order to get journal permissions, journal_id must be specified"
)
query = db_session.query(JournalPermissions).filter(
JournalPermissions.journal_id == journal_id
)
if user_id is None and user_group_id_list is None:
raise InvalidParameters(
"In order to get journal permissions, at least one of user_id, or user_group_id_list must be specified"
)
query = query.filter(
or_(
JournalPermissions.holder_id == user_id,
JournalPermissions.holder_id.in_(user_group_id_list),
)
)
journal_permissions = query.all()
if not journal_permissions:
raise PermissionsNotFound(f"No permissions for journal_id={journal_id}")
return journal_permissions | 2f3fcc3cbfdc124a10ee04a716c76f7e2144e0de | 1,759 |
import re
def clean_script_title(script_title):
"""Cleans up a TV/movie title to save it as a file name.
"""
clean_title = re.sub(r'\s+', ' ', script_title).strip()
clean_title = clean_title.replace('\\', BACKSLASH)
clean_title = clean_title.replace('/', SLASH)
clean_title = clean_title.replace(':', COLON)
clean_title = clean_title.replace('*', STAR)
clean_title = clean_title.replace('<', LESS_THAN)
clean_title = clean_title.replace('>', GREATER_THAN)
clean_title = clean_title.replace('?', QUESTION_MARK)
clean_title = clean_title.replace('|', PIPE)
return clean_title | 6dcee3b05e9654e65e0f8eb78be9383d349adff2 | 1,760 |
def _calc_cumsum_matrix_jit(X, w_list, p_ar, open_begin):
"""Fast implementation by numba.jit."""
len_x, len_y = X.shape
# cumsum matrix
D = np.ones((len_x, len_y), dtype=np.float64) * np.inf
if open_begin:
X = np.vstack((np.zeros((1, X.shape[1])), X))
D = np.vstack((np.zeros((1, D.shape[1])), D))
w_list[:, 0] += 1
# number of patterns
num_pattern = p_ar.shape[0]
# max pattern length
max_pattern_len = p_ar.shape[1]
# pattern cost
pattern_cost = np.zeros(num_pattern, dtype=np.float64)
# step cost
step_cost = np.zeros(max_pattern_len, dtype=np.float64)
# number of cells
num_cells = w_list.shape[0]
for cell_idx in range(num_cells):
i = w_list[cell_idx, 0]
j = w_list[cell_idx, 1]
if i == j == 0:
D[i, j] = X[0, 0]
continue
for pidx in range(num_pattern):
# calculate local cost for each pattern
for sidx in range(1, max_pattern_len):
# calculate step cost of pair-wise cost matrix
pattern_index = p_ar[pidx, sidx, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
step_cost[sidx] = np.inf
continue
else:
step_cost[sidx] = X[ii, jj] \
* p_ar[pidx, sidx, 2]
pattern_index = p_ar[pidx, 0, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
pattern_cost[pidx] = np.inf
continue
pattern_cost[pidx] = D[ii, jj] \
+ step_cost.sum()
min_cost = pattern_cost.min()
if min_cost != np.inf:
D[i, j] = min_cost
return D | a282f68ca5789c97582f9535b5a255066bba44d9 | 1,762 |
def create_field_texture_coordinates(fieldmodule: Fieldmodule, name="texture coordinates", components_count=3,
managed=False) -> FieldFiniteElement:
"""
Create texture coordinates finite element field of supplied name with
number of components 1, 2, or 3 and the components named "u", "v" and "w" if used.
New field is not managed by default.
"""
return create_field_finite_element(fieldmodule, name, components_count,
component_names=("u", "v", "w"), managed=managed, type_coordinate=True) | e19e964e0828006beae3c9e71f30fb0c846de1de | 1,763 |
import uuid
def get_cert_sha1_by_openssl(certraw: str) -> str:
"""calc the sha1 of a certificate, return openssl result str"""
res: str = None
tmpname = None
try:
tmpname = tmppath / f"{uuid.uuid1()}.crt"
while tmpname.exists():
tmpname = tmppath / f"{uuid.uuid1()}.crt"
tmpname.write_text(certraw, encoding="utf-8")
cmd = f"openssl x509 -in {tmpname} -fingerprint -noout -sha1"
res = exec_openssl(cmd)
except Exception as ex:
raise Exception(f"Parse ssl data error, err:{ex}")
finally:
if tmpname is not None:
tmpname.unlink()
return res | 4b92531473e8488a87d14c8ecc8c88d4d0adef0d | 1,764 |
from typing import Union
def get_dderivative_skewness(uni_ts: Union[pd.Series, np.ndarray], step_size: int = 1) -> np.float64:
"""
:return: The skewness of the difference derivative of univariate time series within the
function we use step_size to find derivative (default value of step_size is 1).
"""
return get_skewness(_difference_derivative(uni_ts, step_size)) | 11688b0cbd5dde2539cc3d5cfa8c5dccb9432f55 | 1,766 |
def extract_query(e: Event, f, woi, data):
"""
create a query array from the the event
:param data:
:param e:
:param doi:
"""
assert woi[0] > 0 and woi[1] > 0
e_start_index = resolve_esi(e, data)
st = int(e_start_index - woi[0] * f)
ed = int(e_start_index + woi[0] * f)
return Event(e.name, e.startT - woi[0], e.endT + woi[1], data[st:ed]), st, ed | 7109e28a265ff49054036e4e4c16ace0fc5eebda | 1,767 |
import ctypes
def getForegroundClassNameUnicode(hwnd=None):
"""
Returns a unicode string containing the class name of the specified
application window.
If hwnd parameter is None, frontmost window will be queried.
"""
if hwnd is None:
hwnd = win32gui.GetForegroundWindow()
# Maximum number of chars we'll accept for the class name; the
# rest will be truncated if it's longer than this.
MAX_LENGTH = 1024
classNameBuf = ctypes.create_unicode_buffer( MAX_LENGTH )
retval = ctypes.windll.User32.GetClassNameW(
hwnd,
classNameBuf,
len( classNameBuf )
)
if retval == 0:
raise ctypes.WinError()
return classNameBuf.value | 82147d3da4c9374078bbeba64ef6968982dc2550 | 1,768 |
def read_mapping_from_csv(bind):
"""
Calls read_csv() and parses the loaded array into a dictionary. The dictionary is defined as follows:
{
"teams": {
*team-name*: {
"ldap": []
},
....
},
"folders: {
*folder-id*: {
"name": *folder-name*,
"permissions": [
{
"teamId": *team-name*,
"permission0: *permission*"
},
....
]
},
...
}
:return: The csv's contents parsed into a dictionary as described above.
"""
result = {"teams": {}, "folders": {}}
csv_content = read_csv(bind)
is_header = True
for line in csv_content:
if not is_header:
ldap = line[0]
team = line[1]
folder_name = line[3]
folder_uuid = line[4]
permission = line[5]
if not team in result["teams"]:
result["teams"][team] = {"ldap": []}
if not ldap in result["teams"][team]["ldap"]:
result["teams"][team]["ldap"].append(ldap)
if not folder_uuid in result["folders"]:
result["folders"][folder_uuid] = {"name": folder_name, "permissions": []}
access = {"teamId": team, "permission": permission}
if not access in result["folders"][folder_uuid]["permissions"]:
result["folders"][folder_uuid]["permissions"].append(access)
else:
is_header = False
return result | 8ffe1b5f489bb3428cb0b2dd3cc7f9eafe9ecf27 | 1,769 |
from typing import Sequence
from typing import Tuple
def primal_update(
agent_id: int,
A: np.ndarray,
W: np.ndarray,
x: np.ndarray,
z: np.ndarray,
lam: np.ndarray,
prev_x: np.ndarray,
prev_z: np.ndarray,
objective_grad: np.ndarray,
feasible_set: CCS,
alpha: float,
tau: float,
nu: float,
others_agent_id: Sequence[int],
others_lam: Sequence[np.ndarray],
) -> Tuple[np.ndarray, np.ndarray]:
""" """
x = feasible_set.projection(
x + alpha * (x - prev_x) - tau * objective_grad - np.matmul(A.T, lam)
)
z = (
z
+ alpha * (z - prev_z)
+ nu
* sum(
[
W[agent_id, oai] * (lam - ol)
for oai, ol in zip(others_agent_id, others_lam)
]
)
)
return x, z | 6a13bb9147b74c3803482f53273ebc831ca1662b | 1,770 |
def norm_cmap(values, cmap, normalize, cm, mn, mx):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
if (mn is None) and (mx is None):
mn, mx = min(values), max(values)
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap, norm | 25515df37fe6b7060acf681287156af2c58d4c03 | 1,771 |
def _cpx(odss_tuple, nterm, ncond):
"""
This function transforms the raw data for electric parameters (voltage, current...) in a suitable complex array
:param odss_tuple: tuple of nphases*2 floats (returned by odsswr as couples of real, imag components, for each phase
of each terminal)
:type odss_tuple: tuple or list
:param nterm: number of terminals of the underlying electric object
:type nterm: int
:param ncond: number of conductors per terminal of the underlying electric object
:type ncond: int
:returns: a [nterm x ncond] numpy array of complex floats
:rtype: numpy.ndarray
"""
assert len(odss_tuple) == nterm * ncond * 2
cpxr = np.zeros([nterm, ncond], 'complex')
def pairwise(iterable):
# "s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
for idx, couple in enumerate(pairwise(odss_tuple)):
real = couple[0]
imag = couple[1]
cpxr[int(idx / ncond), (idx % ncond)] = np.sum([np.multiply(1j, imag), real], axis=0)
cpxr[int(idx / ncond), (idx % ncond)] = np.sum([np.multiply(1j, imag), real], axis=0)
return cpxr | f5931915550bb7ec9e713689c3d79997973eb252 | 1,772 |
def get_l2_distance_arad(X1, X2, Z1, Z2, \
width=0.2, cut_distance=6.0, r_width=1.0, c_width=0.5):
""" Calculates the Gaussian distance matrix D for atomic ARAD for two
sets of molecules
K is calculated using an OpenMP parallel Fortran routine.
Arguments:
==============
X1 -- np.array of ARAD descriptors for molecules in set 1.
X2 -- np.array of ARAD descriptors for molecules in set 2.
Z1 -- List of lists of nuclear charges for molecules in set 1.
Z2 -- List of lists of nuclear charges for molecules in set 2.
Keyword arguments:
width --
cut_distance --
r_width --
c_width --
Returns:
==============
D -- The distance matrices for each sigma (4D-array, Nmol1 x Nmol2 x Natom1 x Natoms2)
"""
amax = X1.shape[1]
assert X1.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 1"
assert X2.shape[1] == amax, "ERROR: Check ARAD decriptor sizes! code = 2"
assert X2.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 3"
nm1 = len(Z1)
nm2 = len(Z2)
assert X1.shape[0] == nm1, "ERROR: Check ARAD decriptor sizes! code = 4"
assert X2.shape[0] == nm2, "ERROR: Check ARAD decriptor sizes! code = 5"
N1 = []
for Z in Z1:
N1.append(len(Z))
N2 = []
for Z in Z2:
N2.append(len(Z))
N1 = np.array(N1,dtype=np.int32)
N2 = np.array(N2,dtype=np.int32)
c1 = []
for charges in Z1:
c1.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z1_arad = np.zeros((nm1,amax,2))
for i in range(nm1):
for j, z in enumerate(c1[i]):
Z1_arad[i,j] = z
c2 = []
for charges in Z2:
c2.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z2_arad = np.zeros((nm2,amax,2))
for i in range(nm2):
for j, z in enumerate(c2[i]):
Z2_arad[i,j] = z
return atomic_arad_l2_distance_all(X1, X2, Z1_arad, Z2_arad, N1, N2, \
nm1, nm2, width, cut_distance, r_width, c_width, amax) | 77a4656a6f0014453991b8619ea4c53c6eec2c78 | 1,773 |
def _swap_endian(val, length):
"""
Swap the endianness of a number
"""
if length <= 8:
return val
if length <= 16:
return (val & 0xFF00) >> 8 | (val & 0xFF) << 8
if length <= 32:
return ((val & 0xFF000000) >> 24 |
(val & 0x00FF0000) >> 8 |
(val & 0x0000FF00) << 8 |
(val & 0x000000FF) << 24)
raise Exception('Cannot swap endianness for length ' + length) | 4b3b879ad04e43e9454b904ba65420a8d477b629 | 1,774 |
def get_analysis(output, topology, traj):
"""
Calls analysis fixture with the right arguments depending on the trajectory type.
Parameters
-----------
output : str
Path to simulation 'output' folder.
topology : str
Path to the topology file.
traj : str
Trajectory type: xtc or pdb.
"""
traj = traj if traj else "pdb"
trajectory = f"trajectory.{traj}"
analysis = Analysis(
resname="LIG",
chain="Z",
simulation_output=output,
skip_initial_structures=False,
topology=topology,
water_ids_to_track=[("A", 2109), ("A", 2124)],
traj=trajectory,
)
return analysis | 0382f4e672aba3ab754de7d26d27c7921239951f | 1,775 |
def get_callback_class(module_name, subtype):
""" Can return None. If no class implementation exists for the given subtype, the module is
searched for a BASE_CALLBACKS_CLASS implemention which is used if found. """
module = _get_module_from_name(module_name)
if subtype is None:
return _get_callback_base_class(module)
try:
return getattr(module, subtype + CALLBACK_PREFIX)
# If the callback implementation for this subtype doesn't exist,
# attempt to load the BASE_CALLBACKS_CLASS class.
except AttributeError:
return _get_callback_base_class(module) | cf04ddcc28c43b82db44d8be96419efbc166330f | 1,776 |
def index():
"""Toon de metingen"""
return render_template('index.html', metingen=Meting.query.all()) | 3d92b912c0af513b6d20a094799f7dfb60220a75 | 1,777 |
def about(template):
"""
Attach a template to a step which can be used to generate
documentation about the step.
"""
def decorator(step_function):
step_function._about_template = template
return step_function
return decorator | 7c00256e39481247857b34dcd5b7783a39b0a8bd | 1,778 |
import torch
def _extend_batch_dim(t: torch.Tensor, new_batch_dim: int) -> torch.Tensor:
"""
Given a tensor `t` of shape [B x D1 x D2 x ...] we output the same tensor repeated
along the batch dimension ([new_batch_dim x D1 x D2 x ...]).
"""
num_non_batch_dims = len(t.shape[1:])
repeat_shape = (new_batch_dim, *(1 for _ in range(num_non_batch_dims)))
return t.repeat(repeat_shape) | 7ee1d0930f843a9d31bcc4934d675109f3b2df9b | 1,779 |
def get_client(config):
"""
get_client returns a feature client configured using data found in the
settings of the current application.
"""
storage = _features_from_settings(config.registry.settings)
return Client(storage) | 650f0d294514a4d13afd9ab010d6d4bdd4045c43 | 1,781 |
from datetime import datetime
def fra_months(z): # Apologies, this function is verbose--function modeled after SSA regulations
"""A function that returns the number of months from date of birth to FRA based on SSA chart"""
# Declare global variable
global months_to_fra
# If date of birth is 1/1/1938 or earlier, full retirement age (FRA) is 65
if z < datetime.date(1938, 1, 2):
months_to_fra = 780
# If date of birth is between 1/2/1938 and 1/1/1939, then (FRA) is age 65 + 2 months
elif z < datetime.date(1939, 1, 2):
months_to_fra = 782
# If date of birth is between 1/2/1939 and 1/1/1940, then (FRA) is age 65 + 4 months
elif z < datetime.date(1940, 1, 2):
months_to_fra = 784
# If date of birth is between 1/2/1940 and 1/1/1941, then (FRA) is age 65 + 6 months
elif z < datetime.date(1941, 1, 2):
months_to_fra = 786
# If date of birth is between 1/2/1941 and 1/1/1942, then (FRA) is age 65 + 8 months
elif z < datetime.date(1942, 1, 2):
months_to_fra = 788
# If date of birth is between 1/2/1942 and 1/1/1943, then (FRA) is age 65 + 10 months
elif z < datetime.date(1943, 1, 2):
months_to_fra = 790
# If date of birth is between 1/2/1943 and 1/1/1955, then (FRA) is age 66
elif z < datetime.date(1955, 1, 2):
months_to_fra = 792
# If date of birth is between 1/2/1955 and 1/1/1956, then (FRA) is age 66 + 2 months
elif z < datetime.date(1956, 1, 2):
months_to_fra = 794
# If date of birth is between 1/2/1956 and 1/1/1957, then (FRA) is age 66 + 4 months
elif z < datetime.date(1957, 1, 2):
months_to_fra = 796
# If date of birth is between 1/2/1957 and 1/1/1958, then (FRA) is age 66 + 6 months
elif z < datetime.date(1958, 1, 2):
months_to_fra = 798
# If date of birth is between 1/2/1958 and 1/1/1959, then (FRA) is age 66 + 8 months
elif z < datetime.date(1959, 1, 2):
months_to_fra = 800
# If date of birth is between 1/2/1959 and 1/1/1960, then (FRA) is age 66 + 10 months
elif z < datetime.date(1960, 1, 2):
months_to_fra = 802
# If date of birth is 1/2/1960 or later, then (FRA) is age 67
else:
months_to_fra = 804
return months_to_fra | 70ba416f6415fd5db08244ae7543db0573f74b2d | 1,783 |
def set_global_format_spec(formats: SpecDict):
"""Set the global default format specifiers.
Parameters
----------
formats: dict[type, str]
Class-based format identifiers.
Returns
-------
old_spec : MultiFormatSpec
The previous globally-set formatters.
Example
-------
>>> s = section.Elastic2D(1, 29000, 10, 144)
>>> print(s)
section Elastic 1 29000 10 144
>>> set_global_format_spec({float: '#.3g'})
MultiFormatSpec(int='d', float='g')
>>> print(s)
section Elastic 1 2.90e+04 10.0 144.
"""
old_spec = _GLOBAL_FORMAT_SPEC.copy()
_GLOBAL_FORMAT_SPEC.update(formats)
return old_spec | 1494a6ff2ad71aa9ed0d20bc0620a124d404e5da | 1,784 |
def gen_base_pass(length=15):
"""
Generate base password.
- A new password will be generated on each call.
:param length: <int> password length.
:return: <str> base password.
"""
generator = PassGen()
return generator.make_password(length=length) | 571683589e13b8dcbd74573b31e5fc7644360bfe | 1,785 |
def split_component_chars(address_parts):
"""
:param address_parts: list of the form [(<address_part_1>, <address_part_1_label>), .... ]
returns [(<char_0>, <address_comp_for_char_0), (<char_1>, <address_comp_for_char_1),.., (<char_n-1>, <address_comp_for_char_n-1)]
"""
char_arr = []
for address_part, address_part_label in address_parts:
# The address part of the tuple (address_part, address_part_label)
for c in address_part:
char_arr.append((c, address_part_label))
return char_arr | f4f3dd59378a689e9048cee96b8d6f12e9d8fe21 | 1,786 |
import json
def report_metrics(topic, message):
"""
将metric数据通过datamanage上报到存储中
:param topic: 需要上报的topic
:param message: 需要上报的打点数据
:return: 上报结果
"""
try:
res = DataManageApi.metrics.report({"kafka_topic": topic, MESSAGE: message, TAGS: [DEFAULT_GEOG_AREA_TAG]})
logger.info(f"report capacity metric {json.dumps(message)}")
if res.is_success():
return True
else:
logger.warning(f"report metric failed. {json.dumps(message)} {res.message}")
return False
except Exception:
logger.error("query metric failed, encounter some exception", exc_info=True)
return False | 28f0bf1671b4116b26b8dba3f0c0a34174a0597a | 1,787 |
def wg_completion_scripts_cb(data, completion_item, buffer, completion):
""" Complete with known script names, for command '/weeget'. """
global wg_scripts
wg_read_scripts(download_list=False)
if len(wg_scripts) > 0:
for id, script in wg_scripts.items():
weechat.hook_completion_list_add(completion, script["full_name"],
0, weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK | b9dc0d5e736cfeb1dc98d09b8e12c6a52696d89d | 1,788 |
def getG(source):
""" Read the Graph from a textfile """
G = {}
Grev = {}
for i in range(1,N+1):
G[i] = []
Grev[i] = []
fin = open(source)
for line in fin:
v1 = int(line.split()[0])
v2 = int(line.split()[1])
G[v1].append(v2)
Grev[v2].append(v1)
fin.close()
return G, Grev | 6e9a8a5c69267403ee3c624670c60af547d37a46 | 1,789 |
import re
def remove_version(code):
""" Remove any version directive """
pattern = '\#\s*version[^\r\n]*\n'
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
return regex.sub('\n', code) | 101ada9490137a879ea287076989a732942368f8 | 1,790 |
def unlabeled_balls_in_labeled_boxes(balls, box_sizes):
"""
OVERVIEW
This function returns a generator that produces all distinct distributions of
indistinguishable balls among labeled boxes with specified box sizes
(capacities). This is a generalization of the most common formulation of the
problem, where each box is sufficiently large to accommodate all of the
balls, and is an important example of a class of combinatorics problems
called 'weak composition' problems.
CONSTRUCTOR INPUTS
n: the number of balls
box_sizes: This argument is a list of length 1 or greater. The length of
the list corresponds to the number of boxes. `box_sizes[i]` is a positive
integer that specifies the maximum capacity of the ith box. If
`box_sizes[i]` equals `n` (or greater), the ith box can accommodate all `n`
balls and thus effectively has unlimited capacity.
ACKNOWLEDGMENT
I'd like to thank Chris Rebert for helping me to convert my prototype
class-based code into a generator function.
"""
if not isinstance(balls, int):
raise TypeError("balls must be a non-negative integer.")
if balls < 0:
raise ValueError("balls must be a non-negative integer.")
if not isinstance(box_sizes,list):
raise ValueError("box_sizes must be a non-empty list.")
capacity= 0
for size in box_sizes:
if not isinstance(size, int):
raise TypeError("box_sizes must contain only positive integers.")
if size < 1:
raise ValueError("box_sizes must contain only positive integers.")
capacity+= size
if capacity < balls:
raise ValueError("The total capacity of the boxes is less than the "
"number of balls to be distributed.")
return _unlabeled_balls_in_labeled_boxes(balls, box_sizes) | 6390226744c2d4b756b43e880707accc333893d5 | 1,791 |
def beginning_next_non_empty_line(bdata, i):
""" doc
"""
while bdata[i] not in EOL:
i += 1
while bdata[i] in EOL:
i += 1
return i | 0a372729a7ad794a9385d87be39d62b1e6831b71 | 1,792 |
import collections
def VisualizeBoxes(image,
boxes,
classes,
scores,
class_id_to_name,
min_score_thresh=.25,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False,
text_loc='TOP'):
"""Visualize boxes on top down image."""
box_to_display_str_map = collections.defaultdict(str)
box_to_color_map = collections.defaultdict(str)
num_boxes = boxes.shape[0]
for i in range(num_boxes):
if scores is not None and scores[i] < min_score_thresh:
continue
box = tuple(boxes[i].tolist())
display_str = ''
if not skip_labels:
if classes[i] in class_id_to_name:
class_name = class_id_to_name[classes[i]]
display_str = str(class_name)
else:
display_str = 'N/A'
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box] = display_str
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
box_to_color_map[box] = PIL_COLOR_LIST[classes[i] % len(PIL_COLOR_LIST)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
DrawBoundingBoxOnImage(
image,
box,
color=color,
thickness=line_thickness,
display_str=box_to_display_str_map[box],
text_loc=text_loc)
return image | b02216a5a2e7fa7029dd0fea298efd1d593bab88 | 1,793 |
def cal_softplus(x):
"""Calculate softplus."""
return np.log(np.exp(x) + 1) | a966826f1e508ca1a197e63396ae9e2f779bcf96 | 1,795 |
def prepare_spark_conversion(df: pd.DataFrame) -> pd.DataFrame:
"""Pandas does not distinguish NULL and NaN values. Everything null-like
is converted to NaN. However, spark does distinguish NULL and NaN for
example. To enable correct spark dataframe creation with NULL and NaN
values, the `PANDAS_NULL` constant is used as a workaround to enforce NULL
values in pyspark dataframes. Pyspark treats `None` values as NULL.
Parameters
----------
df: pd.DataFrame
Input dataframe to be prepared.
Returns
-------
df_prepared: pd.DataFrame
Prepared dataframe for spark conversion.
"""
return df.where(df.ne(PANDAS_NULL), None) | f18ddfc3e77809908bf8fa365c1acf8a8d5069c6 | 1,797 |
def loyalty():
"""Пересчитать индекс лояльности"""
articles = Article.objects.all()
if articles.count() == 0:
logger.info('Пока нет статей для пересчёта. Выходим...')
return False
logger.info('Начало пересчёта индекса лояльности')
logger.info(f'Количество материалов: {articles.count()}')
texts = [item.text for item in articles]
dt = DefineText(texts)
themes, _ = dt.article_theme()
sentiments, _ = dt.article_sentiment()
for article, theme, sentiment in zip(articles, themes, sentiments):
article.theme = bool(theme)
article.sentiment = sentiment
article.save() | 3db3214e1d6d2f2f3d54f9c1d01807ed9558ef6b | 1,800 |
import json
def user_info(request):
"""Returns a JSON object containing the logged-in student's information."""
student = request.user.student
return HttpResponse(json.dumps({
'academic_id': student.academic_id,
'current_semester': int(student.current_semester),
'name': student.name,
'username': request.user.username}), content_type="application/json") | 41c1bcc8f69d97f76acbe7f15c4bc5cbc2ea6b60 | 1,801 |
def extract_brain_activation(brainimg, mask, roilabels, method='mean'):
"""
Extract brain activation from ROI.
Parameters
----------
brainimg : array
A 4D brain image array with the first dimension correspond to pictures and the rest 3D correspond to brain images
mask : array
A 3D brain image array with the same size as the rest 3D of brainimg.
roilabels : list, array
ROI labels
method : str
Method to integrate activation from each ROI, by default is 'mean'.
Returns
-------
roisignals : list
Extracted brain activation.
Each element in the list is the extracted activation of the roilabels.
Due to different label may contain different number of activation voxels,
the output activation could not stored as numpy array list.
"""
if method == 'mean':
calc_way = partial(np.mean, axis=1)
elif method == 'std':
calc_way = partial(np.std, axis=1)
elif method == 'max':
calc_way = partial(np.max, axis=1)
elif method == 'voxel':
calc_way = np.array
else:
raise Exception('We haven''t support this method, please contact authors to implement.')
assert brainimg.shape[1:] == mask.shape, "brainimg and mask are mismatched."
roisignals = []
for i, lbl in enumerate(roilabels):
roisignals.append(calc_way(brainimg[:, mask==lbl]))
return roisignals | fac20ea1c99696aab84137964dbbfdfa7bd66612 | 1,802 |
def logit(x):
"""
Elementwise logit (inverse logistic sigmoid).
:param x: numpy array
:return: numpy array
"""
return np.log(x / (1.0 - x)) | 4ce2474a9eb97208613268d3005959a4a162dbe0 | 1,803 |
import hashlib
import binascii
def _base58_decode(address: str) -> bool:
"""
SEE https://en.bitcoin.it/wiki/Base58Check_encoding
"""
try:
decoded_address = base58.b58decode(address).hex()
result, checksum = decoded_address[:-8], decoded_address[-8:]
except ValueError:
return False
else:
for _ in range(1, 3):
result = hashlib.sha256(binascii.unhexlify(result)).hexdigest()
return checksum == result[:8] | e0610e882b64511743376ce7a0370e7600436411 | 1,804 |
def get_average_matrix(shape, matrices):
""" Take the average matrix by a list of matrices of same shape """
return _ImageConvolution().get_average_matrix(shape, matrices) | dfdd4995751bb2894a7bada961d863bb800e79a5 | 1,805 |
def music(hot_music_url, **kwargs):
"""
get hot music result
:return: HotMusic object
"""
result = fetch(hot_music_url, **kwargs)
# process json data
datetime = parse_datetime(result.get('active_time'))
# video_list = result.get('music_list', [])
musics = []
music_list = result.get('music_list', [])
for item in music_list:
music = data_to_music(item.get('music_info', {}))
music.hot_count = item.get('hot_value')
musics.append(music)
# construct HotMusic object and return
return HotMusic(datetime=datetime, data=musics) | cf49e0648bb84ff9aa033bf49f732260770b47f5 | 1,807 |
def parse_from_docstring(docstring, spec='operation'):
"""Returns path spec from docstring"""
# preprocess lines
lines = docstring.splitlines(True)
parser = _ParseFSM(FSM_MAP, lines, spec)
parser.run()
return parser.spec | 37026d6e0fd0edf476d59cdd33ac7ec2d04eb38d | 1,808 |
def collection_headings(commodities) -> CommodityCollection:
"""Returns a special collection of headings to test header and chapter
parenting rules."""
keys = ["9900_80_0", "9905_10_0", "9905_80_0", "9910_10_0", "9910_80_0"]
return create_collection(commodities, keys) | 545419cd79fbd86d0a16aad78996977ea1ff4605 | 1,809 |
import getpass
def get_ssh_user():
"""Returns ssh username for connecting to cluster workers."""
return getpass.getuser() | 166048aa258bd0b2c926d03478e8492a405b0f7e | 1,810 |
def tryf(body, *handlers, elsef=None, finallyf=None):
"""``try``/``except``/``finally`` as a function.
This allows lambdas to handle exceptions.
``body`` is a thunk (0-argument function) that represents
the body of the ``try`` block.
``handlers`` is ``(excspec, handler), ...``, where
``excspec`` is either an exception type,
or a tuple of exception types.
``handler`` is a 0-argument or 1-argument
function. If it takes an
argument, it gets the exception
instance.
Handlers are tried in the order specified.
``elsef`` is a thunk that represents the ``else`` block.
``finallyf`` is a thunk that represents the ``finally`` block.
Upon normal completion, the return value of ``tryf`` is
the return value of ``elsef`` if that was specified, otherwise
the return value of ``body``.
If an exception was caught by one of the handlers, the return
value of ``tryf`` is the return value of the exception handler
that ran.
If you need to share variables between ``body`` and ``finallyf``
(which is likely, given what a ``finally`` block is intended
to do), consider wrapping the ``tryf`` in a ``let`` and storing
your variables there. If you want them to leak out of the ``tryf``,
you can also just create an ``env`` at an appropriate point,
and store them there.
"""
def accepts_arg(f):
try:
if arity_includes(f, 1):
return True
except UnknownArity: # pragma: no cover
return True # just assume it
return False
def isexceptiontype(exc):
try:
if issubclass(exc, BaseException):
return True
except TypeError: # "issubclass() arg 1 must be a class"
pass
return False
# validate handlers
for excspec, handler in handlers:
if isinstance(excspec, tuple): # tuple of exception types
if not all(isexceptiontype(t) for t in excspec):
raise TypeError(f"All elements of a tuple excspec must be exception types, got {excspec}")
elif not isexceptiontype(excspec): # single exception type
raise TypeError(f"excspec must be an exception type or tuple of exception types, got {excspec}")
# run
try:
ret = body()
except BaseException as exception:
# Even if a class is raised, as in `raise StopIteration`, the `raise` statement
# converts it into an instance by instantiating with no args. So we need no
# special handling for the "class raised" case.
# https://docs.python.org/3/reference/simple_stmts.html#the-raise-statement
# https://stackoverflow.com/questions/19768515/is-there-a-difference-between-raising-exception-class-and-exception-instance/19768732
exctype = type(exception)
for excspec, handler in handlers:
if isinstance(excspec, tuple): # tuple of exception types
# this is safe, exctype is always a class at this point.
if any(issubclass(exctype, t) for t in excspec):
if accepts_arg(handler):
return handler(exception)
else:
return handler()
else: # single exception type
if issubclass(exctype, excspec):
if accepts_arg(handler):
return handler(exception)
else:
return handler()
else:
if elsef is not None:
return elsef()
return ret
finally:
if finallyf is not None:
finallyf() | bde4282c4422272717e48a546430d2b93e9d0529 | 1,811 |
def obtain_sheet_music(score, most_frequent_dur):
"""
Returns unformated sheet music from score
"""
result = ""
octaves = [3 for i in range(12)]
accidentals = [False for i in range(7)]
for event in score:
for note_indx in range(len(event[0])):
data = notenum2string(event[0][note_indx], accidentals, octaves)
result += data[0]
accidentals = data[1]
octaves = data[2]
if note_indx != len(event[0])-1:
result += '-'
if event[1] != most_frequent_dur: # Quarters are default
result += '/'
result += dur2mod(event[1], most_frequent_dur)
result += ','
return result | 4c216f2cca0d2054af355bc097c22ff2b7662969 | 1,812 |
def adjacency_matrix(edges):
"""
Convert a directed graph to an adjacency matrix.
Note: The distance from a node to itself is 0 and distance from a node to
an unconnected node is defined to be infinite.
Parameters
----------
edges : list of tuples
list of dependencies between nodes in the graph
[(source node, destination node, weight), ...]
Returns
-------
out : tuple
(names, adjacency matrix)
names - list of unique nodes in the graph
adjacency matrix represented as list of lists
"""
# determine the set of unique nodes
names = set()
for src, dest, _ in edges:
# add source and destination nodes
names.add(src)
names.add(dest)
# convert set of names to sorted list
names = sorted(names)
# determine initial adjacency matrix with infinity weights
matrix = [[float('Inf')] * len(names) for _ in names]
for src, dest, weight in edges:
# update weight in adjacency matrix
matrix[names.index(src)][names.index(dest)] = weight
for src in names:
matrix[names.index(src)][names.index(src)] = 0
# return list of names and adjacency matrix
return names, matrix | b8743a6fa549b39d5cb24ae1f276e911b954ee5a | 1,813 |
def estimate_Cn(P=1013, T=273.15, Ct=1e-4):
"""Use Weng et al to estimate Cn from meteorological data.
Parameters
----------
P : `float`
atmospheric pressure in hPa
T : `float`
temperature in Kelvin
Ct : `float`
atmospheric struction constant of temperature, typically 10^-5 - 10^-2 near the surface
Returns
-------
`float`
Cn
"""
return (79 * P / (T ** 2)) * Ct ** 2 * 1e-12 | b74dd0c91197c24f880521a06d6bcd205d749448 | 1,814 |
import ctypes
def sg_get_scsi_status_str(scsi_status):
""" Fetch scsi status string. """
buff = _get_buffer(128)
libsgutils2.sg_get_scsi_status_str(scsi_status, 128, ctypes.byref(buff))
return buff.value.decode('utf-8') | 2bdf7feb455ccbab659961ddbba04a9fa1daeb85 | 1,815 |
import math
def numpy_grid(x, pad=0, nrow=None, uint8=True):
""" thin wrap to make_grid to return frames ready to save to file
args
pad (int [0]) same as utils.make_grid(padding)
nrow (int [None]) # defaults to horizonally biased rectangle closest to square
uint8 (bool [True]) convert to img in range 0-255 uint8
"""
x = x.clone().detach().cpu()
nrow = nrow or int(math.sqrt(x.shape[0]))
x = ((utils.make_grid(x, nrow=nrow, padding=pad).permute(1,2,0) - x.min())/(x.max()-x.min())).numpy()
if uint8:
x = (x*255).astype("uint8")
return x | e83452bb2387d79ca307840487bb4bdd24efed87 | 1,816 |
import functools
def if_active(f):
"""decorator for callback methods so that they are only called when active"""
@functools.wraps(f)
def inner(self, loop, *args, **kwargs):
if self.active:
return f(self, loop, *args, **kwargs)
return inner | 83b4eabaafa9602ad0547f87aeae99a63872152a | 1,817 |
def obs_all_node_target_pairs_one_hot(agent_id: int, factory: Factory) -> np.ndarray:
"""One-hot encoding (of length nodes) of the target location for each node. Size of nodes**2"""
num_nodes = len(factory.nodes)
node_pair_target = np.zeros(num_nodes ** 2)
for n in range(num_nodes):
core_target_index = []
if factory.nodes[n].table != None and factory.nodes[n].table.has_core():
core_target_index = [
factory.nodes.index(factory.nodes[n].table.core.current_target)
]
node_pair_target[n * num_nodes : (n + 1) * num_nodes] = np.asarray(
one_hot_encode(num_nodes, core_target_index)
)
else:
node_pair_target[n * num_nodes : (n + 1) * num_nodes] = np.zeros(num_nodes)
return node_pair_target | aed5fa19baf28c798f1e064b878b148867d19053 | 1,818 |
from typing import Callable
from typing import List
import math
def repeat_each_position(shape: GuitarShape, length: int = None, repeats: int = 2, order: Callable = asc) -> List[
List[FretPosition]]:
"""
Play each fret in the sequence two or more times
"""
if length is not None:
div_length = math.ceil(length / repeats)
else:
div_length = length
pattern = order(shape, length=div_length)
new_positions = []
for positions in pattern:
new_positions.extend([positions] * repeats)
if length is not None and len(new_positions) != length:
new_positions = adjust_length(new_positions, length)
return new_positions | 9783e218134839410e02d4bc5210804d6a945d6d | 1,819 |
import csv
import gzip
from StringIO import StringIO
import pandas
def gz_csv_read(file_path, use_pandas=False):
"""Read a gzipped csv file.
"""
with gzip.open(file_path, 'r') as infile:
if use_pandas:
data = pandas.read_csv(StringIO(infile.read()))
else:
reader = csv.reader(StringIO(infile.read()))
data = [row for row in reader]
return data | 725132f37454b66b6262236966c96d4b48a81049 | 1,820 |
def init_block(in_channels, out_channels, stride, activation=nn.PReLU):
"""Builds the first block of the MobileFaceNet"""
return nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm2d(out_channels),
make_activation(activation)
) | 966ccb1ca1cb7e134db3ac40bb4daf54950743b1 | 1,821 |
def address_working(address, value=None):
"""
Find, insert or delete from database task address
:param address: website address example: https://www.youtube.com/
:param value: True: add , False: remove, default: find
:return:
"""
global db
if value is True:
db.tasks.insert_one({'Address': address})
return True
if value is False:
db.tasks.delete_many({'Address': address})
return False
x = list(db.tasks.find({'Address': address}))
if len(x) == 0:
return False
else:
return True | 5879fb6f3d4756aceb8424a5fc22fd232841802c | 1,822 |
def merge_default_values(resource_list, default_values):
"""
Generate a new list where each item of original resource_list will be merged with the default_values.
Args:
resource_list: list with items to be merged
default_values: properties to be merged with each item list. If the item already contains some property
the original value will be maintained.
Returns:
list: list containing each item merged with default_values
"""
def merge_item(resource):
return merge_resources(default_values, resource)
return lmap(merge_item, resource_list) | c2e98260a34762d17185eacdfc2fb4be1b3a45f3 | 1,823 |
from datetime import datetime
import pytz
def finish_scheduling(request, schedule_item=None, payload=None):
"""
Finalize the creation of a scheduled action. All required data is passed
through the payload.
:param request: Request object received
:param schedule_item: ScheduledAction item being processed. If None,
it has to be extracted from the information in the payload.
:param payload: Dictionary with all the required data coming from
previous requests.
:return:
"""
# Get the payload from the session if not given
if payload is None:
payload = request.session.get(session_dictionary_name)
# If there is no payload, something went wrong.
if payload is None:
# Something is wrong with this execution. Return to action table.
messages.error(request,
_('Incorrect action scheduling invocation.'))
return redirect('action:index')
# Get the scheduled item if needed
if not schedule_item:
s_item_id = payload.get('schedule_id')
if not s_item_id:
messages.error(request, _('Incorrect parameters in action scheduling'))
return redirect('action:index')
# Get the item being processed
schedule_item = ScheduledAction.objects.get(pk=s_item_id)
# Check for exclude values and store them if needed
exclude_values = payload.get('exclude_values')
if exclude_values:
schedule_item.exclude_values = exclude_values
schedule_item.status = ScheduledAction.STATUS_PENDING
schedule_item.save()
# Create the payload to record the event in the log
log_payload = {
'action': schedule_item.action.name,
'action_id': schedule_item.action.id,
'execute': schedule_item.execute.isoformat(),
}
if schedule_item.action.action_type == Action.PERSONALIZED_TEXT:
log_payload.update({
'email_column': schedule_item.item_column.name,
'subject': schedule_item.payload.get('subject'),
'cc_email': schedule_item.payload.get('cc_email', []),
'bcc_email': schedule_item.payload.get('bcc_email', []),
'send_confirmation': schedule_item.payload.get('send_confirmation',
False),
'track_read': schedule_item.payload.get('track_read', False)
})
log_type = Log.SCHEDULE_EMAIL_EDIT
elif schedule_item.action.action_type == Action.PERSONALIZED_JSON:
ivalue = None
if schedule_item.item_column:
ivalue = schedule_item.item_column.name
log_payload.update({
'item_column': ivalue,
'token': schedule_item.payload.get('subject')
})
log_type = Log.SCHEDULE_JSON_EDIT
else:
log_type = None
# Create the log
Log.objects.register(request.user,
log_type,
schedule_item.action.workflow,
log_payload)
# Notify the user. Show the time left until execution and a link to
# view the scheduled events with possibility of editing/deleting.
# Successful processing.
now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE))
tdelta = schedule_item.execute - now
# Reset object to carry action info throughout dialogs
request.session[session_dictionary_name] = {}
request.session.save()
# Create the timedelta string
delta_string = ''
if tdelta.days != 0:
delta_string += ugettext('{0} days').format(tdelta.days)
hours = tdelta.seconds / 3600
if hours != 0:
delta_string += ugettext(', {0} hours').format(hours)
minutes = (tdelta.seconds % 3600) / 60
if minutes != 0:
delta_string += ugettext(', {0} minutes').format(minutes)
# Successful processing.
return render(request,
'scheduler/schedule_done.html',
{'tdelta': delta_string,
's_item': schedule_item}) | fa0fb4648ef9d750eca9f19ea435fd57ab433ad8 | 1,824 |
import json
def analyze(results_file, base_path):
"""
Parse and print the results from gosec audit.
"""
# Load gosec json Results File
with open(results_file) as f:
issues = json.load(f)['Issues']
if not issues:
print("Security Check: No Issues Detected!")
return ([], [], [])
else:
high_risk = list()
medium_risk = list()
low_risk = list()
# Sort Issues
for issue in issues:
if issue['severity'] == 'HIGH':
high_risk.append(issue)
elif issue['severity'] == 'MEDIUM':
medium_risk.append(issue)
elif issue['severity'] == 'LOW':
low_risk.append(issue)
# Print Summary
print()
print('Security Issue Summary:')
print(' Found ' + str(len(high_risk)) + ' High Risk Issues')
print(' Found ' + str(len(medium_risk)) + ' Medium Risk Issues')
print(' Found ' + str(len(low_risk)) + ' Low Risk Issues')
# Print Issues In Order of Importance
if high_risk:
header = ('= High Security Risk Issues =')
print_category(header, high_risk, base_path)
if medium_risk:
header = ('= Medium Security Risk Issues =')
print_category(header, medium_risk, base_path)
if low_risk:
header = ('= Low Security Risk Issues =')
print_category(header, low_risk, base_path)
return (high_risk, medium_risk, low_risk) | a016f4ba389305103c9bbab1db94706053237e5a | 1,825 |
def _peaks(image,nr,minvar=0):
"""Divide image into nr quadrants and return peak value positions."""
n = np.ceil(np.sqrt(nr))
quadrants = _rects(image.shape,n,n)
peaks = []
for q in quadrants:
q_image = image[q.as_slice()]
q_argmax = q_image.argmax()
q_maxpos = np.unravel_index(q_argmax,q.shape)
if q_image.flat[q_argmax] > minvar:
peaks.append(np.array(q_maxpos) + q.origin)
return peaks | f7ecc3e5fafd55c38a85b4e3a05a04b25cbd97cf | 1,826 |
def connection_type_validator(type):
"""
Property: ConnectionInput.ConnectionType
"""
valid_types = [
"CUSTOM",
"JDBC",
"KAFKA",
"MARKETPLACE",
"MONGODB",
"NETWORK",
"SFTP",
]
if type not in valid_types:
raise ValueError("% is not a valid value for ConnectionType" % type)
return type | cc2ed6096097c719b505356e69a5bb5cdc109495 | 1,828 |
import calendar
import time
def render_pretty_time(jd):
"""Convert jd into a pretty string representation"""
year, month, day, hour_frac = sweph.revjul(jd)
_, hours, minutes, seconds = days_frac_to_dhms(hour_frac/24)
time_ = calendar.timegm((year,month,day,hours,minutes,seconds,0,0,0))
return time.strftime('%e %b %Y %H:%M UTC', time.gmtime(time_)) | 07c63429ae7881fbdec867e8bebab7578bfaacdd | 1,830 |
import json
def jsonify(obj):
"""Dump an object to JSON and create a Response object from the dump.
Unlike Flask's native implementation, this works on lists.
"""
dump = json.dumps(obj)
return Response(dump, mimetype='application/json') | 72e1fb425507d5905ef96de05a146805f5aa4175 | 1,831 |
def section(stree):
"""
Create sections in a :class:`ScheduleTree`. A section is a sub-tree with
the following properties: ::
* The root is a node of type :class:`NodeSection`;
* The immediate children of the root are nodes of type :class:`NodeIteration`
and have same parent.
* The :class:`Dimension` of the immediate children are either: ::
* identical, OR
* different, but all of type :class:`SubDimension`;
* The :class:`Dimension` of the immediate children cannot be a
:class:`TimeDimension`.
"""
class Section(object):
def __init__(self, node):
self.parent = node.parent
self.dim = node.dim
self.nodes = [node]
def is_compatible(self, node):
return (self.parent == node.parent
and (self.dim == node.dim or node.dim.is_Sub))
# Search candidate sections
sections = []
for i in range(stree.height):
# Find all sections at depth `i`
section = None
for n in findall(stree, filter_=lambda n: n.depth == i):
if any(p in flatten(s.nodes for s in sections) for p in n.ancestors):
# Already within a section
continue
elif not n.is_Iteration or n.dim.is_Time:
section = None
elif section is None or not section.is_compatible(n):
section = Section(n)
sections.append(section)
else:
section.nodes.append(n)
# Transform the schedule tree by adding in sections
for i in sections:
node = NodeSection()
processed = []
for n in list(i.parent.children):
if n in i.nodes:
n.parent = node
if node not in processed:
processed.append(node)
else:
processed.append(n)
i.parent.children = processed
return stree | edd6682d1ff2a637049a801d548181d35e07961a | 1,832 |
def was_csv_updated() -> bool:
""" This function compares the last modified time on the csv file to the
actions folder to check which was last modified.
1. check if csv or files have more actions.
2. if same number of actions, assume the update was made in the csv
"""
csv_actions = get_cas_from_csv()
file_actions = get_cas_from_files()
return (
True
if len(csv_actions) >= len(file_actions)
else False
) | 7cf78696fa59e8abbe968916191600a265c96305 | 1,833 |
import math
def MakeBands(dR, numberOfBands, nearestInteger):
"""
Divide a range into bands
:param dR: [min, max] the range that is to be covered by the bands.
:param numberOfBands: the number of bands, a positive integer.
:param nearestInteger: if True then [floor(min), ceil(max)] is used.
:return: A List consisting of [min, midpoint, max] for each band.
"""
bands = list()
if (dR[1] < dR[0]) or (numberOfBands <= 0):
return bands
x = list(dR)
if nearestInteger:
x[0] = math.floor(x[0])
x[1] = math.ceil(x[1])
dx = (x[1] - x[0]) / float(numberOfBands)
b = [x[0], x[0] + dx / 2.0, x[0] + dx]
i = 0
while i < numberOfBands:
bands.append(b)
b = [b[0] + dx, b[1] + dx, b[2] + dx]
i += 1
return bands | 104720371d1f83bf2ee2c8fddbf05401ec034560 | 1,834 |
import math
def euler719(n=10**12):
"""Solution for problem 719."""
return sum(i*i
for i in range(2, 1 + int(math.sqrt(n)))
if can_be_split_in_sum(i*i, i)) | 3f814ed837ad58f73f901a81af34ac31b520b372 | 1,835 |
def inner(a, b):
"""
Inner product of two tensors.
Ordinary inner product of vectors for 1-D tensors (without complex
conjugation), in higher dimensions a sum product over the last
axes.
Note:
Numpy argument out is not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtypes are np.float16, np.float32, and
np.float64.
Args:
a (Tensor): input tensor. If a and b are nonscalar, their last
dimensions must match.
b (Tensor): input tensor. If a and b are nonscalar, their last
dimensions must match.
Returns:
Tensor or scalar, out.shape = a.shape[:-1] + b.shape[:-1].
Raises:
ValueError: if x1.shape[-1] != x2.shape[-1].
Supported Platforms:
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.ones((5, 3))
>>> b = np.ones((2, 7, 3))
>>> output = np.inner(a, b)
>>> print(output)
[[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]
[[3. 3. 3. 3. 3. 3. 3.]
[3. 3. 3. 3. 3. 3. 3.]]]
"""
if F.rank(a) == 0 or F.rank(b) == 0:
a = _expand(a, 1)
b = _expand(b, 1)
if F.rank(a) < F.rank(b):
a, b = b, a
return F.tensor_mul(a, b)
_ = _check_shape_aligned(F.shape(a), F.shape(b))
aligned_shape_a = (F.shape_mul(F.shape(a)[:-1]), F.shape(a)[-1])
aligned_shape_b = (F.shape_mul(F.shape(b)[:-1]), F.shape(a)[-1])
a_aligned = F.reshape(a, aligned_shape_a)
b_aligned = F.reshape(b, aligned_shape_b)
res = _matmul_T(a_aligned, b_aligned)
res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
return res | b09ee6e22fd6c9bd7c1fd758fa62b38ad8fae1ab | 1,836 |
def stern_warning(warn_msg: str) -> str:
"""Wraps warn_msg so that it prints in red."""
return _reg(colorama.Fore.RED, warn_msg) | 639f0f6aaf3ce1f6ad46ed0f5d852be3457337fb | 1,837 |
Subsets and Splits