content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def writeData(filename, data):
"""
MBARBIER: Taken/adapted from https://github.com/ChristophKirst/ClearMap/blob/master/ClearMap/IO/TIF.py
Write image data to tif file
Arguments:
filename (str): file name
data (array): image data
Returns:
str: tif file name
"""
d = len(data.shape);
if d == 2:
tiff.imsave(filename, data.transpose([0,1]));
elif d == 3:
tiff.imsave(filename, data.transpose([2,0,1]), photometric = 'minisblack', planarconfig = 'contig', bigtiff = True);
elif d == 4:
#tiffile (z,y,x,c)
tiff.imsave(filename, data.transpose([0,1,2,3]), photometric = 'minisblack', planarconfig = 'contig', bigtiff = True);
else:
raise RuntimeError('writing multiple channel data to tif not supported!');
return filename; | cc4414b9f52413bebc422032f796cd242ecc8ef4 | 3,656,000 |
def get_trigger_function(trigger_message, waiter):
"""Función auxiliar que genera un activador
Args:
trigger_message: mensaje o instruccion para continuar.
waiter: función que pausa el flujo de instrucciones.
"""
def trigger_function():
# Se imprime la instrucción para detonar el activador
print(trigger_message)
waiter()
# Se reproduce un audio confirmando que el activador fue
# detonado.
reproducir_audio(TRIGGER_AUDIO_PATH)
return trigger_function | b389dd93631ae396c65d5653da6cea3ec91b3556 | 3,656,001 |
def find_peaks(amplitude):
"""
A value is considered to be a peak if it is higher than its four closest
neighbours.
"""
# Pad the array with -1 at the beginning and the end to avoid overflows.
padded = np.concatenate((-np.ones(2), amplitude, -np.ones(2)))
# Shift the array by one/two values to the left/right
shifted_l2 = padded[:-4]
shifted_l1 = padded[1:-3]
shifted_r1 = padded[3:-1]
shifted_r2 = padded[4:]
# Compare the original array with the shifted versions.
peaks = ((amplitude >= shifted_l2) & (amplitude >= shifted_l1) &
(amplitude >= shifted_r1) & (amplitude >= shifted_r2))
return peaks | 192f25bbc491c7e880ff5363098b0ced29f37567 | 3,656,002 |
from typing import Optional
def sync(
*,
client: Client,
json_body: CustomFieldOptionsCreateRequestBody,
) -> Optional[CustomFieldOptionsCreateResponseBody]:
"""Create Custom Field Options
Create a custom field option. If the sort key is not supplied, it'll default to 1000, so the option
appears near the end of the list.
Args:
json_body (CustomFieldOptionsCreateRequestBody): Example: {'custom_field_id':
'01FCNDV6P870EA6S7TK1DSYDG0', 'sort_key': 10, 'value': 'Product'}.
Returns:
Response[CustomFieldOptionsCreateResponseBody]
"""
return sync_detailed(
client=client,
json_body=json_body,
).parsed | 6215e704be4bbc32e52fb03817e00d7fd5338365 | 3,656,003 |
def decrypt_with_private_key(data, private_key):
"""Decrypts the PKCS#1 padded shared secret using the private RSA key"""
return _pkcs1_unpad(private_key.decrypt(data)) | f1dac9113fb97f62afab524239e38c6cb196c989 | 3,656,004 |
import os
import re
def loadvars(builddir):
"""if builddir does not exist or does not have a cache, returns an
empty odict"""
v = odict()
if builddir is None or not os.path.exists(builddir):
return v
c = os.path.join(builddir, 'CMakeCache.txt')
if os.path.exists(c):
with open(c, 'r') as f:
for line in f:
# logdbg("loadvars0", line.strip())
if not re.match(_cache_entry, line):
continue
ls = line.strip()
name = re.sub(_cache_entry, r'\1', ls)
vartype = re.sub(_cache_entry, r'\2', ls)[1:]
value = re.sub(_cache_entry, r'\3', ls)
# logdbg("loadvars1", name, vartype, value)
v[name] = CMakeCacheVar(name, value, vartype)
return v | 6069461706d88f3dee96eb6b9aece29b3f47c77b | 3,656,005 |
import warnings
def deprecated (func):
"""
This is a decorator which can be used to mark functions as deprecated. It
will result in a warning being emitted when the function is used.
:param func: original function
:type func: :any:`collections.Callable`
:return: decorated func
:rtype: :any:`collections.Callable`
"""
@wraps(func)
def newFunc (*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return newFunc | ba237c30d97013080bd84569af1817685023dab6 | 3,656,006 |
import re
def prediction():
"""
A function that takes a JSON with two fields: "text" and "maxlen"
Returns: the summarized text of the paragraphs.
"""
print(request.form.values())
paragraphs = request.form.get("paragraphs")
paragraphs = re.sub("\d+", "", paragraphs)
maxlen = int(request.form.get("maxlen"))
summary = summarizer(paragraphs, max_length=maxlen, min_length=49, do_sample=False)
return render_template('index.html', prediction_text = '" {} "'.format(summary[0]["summary_text"])), 200 | a1bdf996908e65e3087ed4ffe27402c9763b4d69 | 3,656,007 |
def is_reviewer(user):
"""Return True if this user is a financial aid reviewer"""
# no need to cache here, all the DB lookups used during has_perm
# are already cached
return user.has_perm("finaid.review_financial_aid") | e3c599f78eb51c33ab48e3760c0f2965ba305916 | 3,656,008 |
def getLogMessage(commitSHA):
"""Get the log message for a given commit hash"""
output = check_output(["git","log","--format=%B","-n","1",commitSHA])
return output.strip() | 2d42e587da57faff5366fc656e8d45a8fa797208 | 3,656,009 |
def get_old_stacks(cfn, old_instances, debug=True):
""" Gets all of the stacks for the old RDS instances """
old_stacks = get_cfn_stack_for_rds(cfn, old_instances, debug)
if debug:
print("DEBUG: Old stacks found: %s" % len(old_stacks))
return old_stacks | 0512b9b9f6043ba5db31a88786e23296ae4c03dc | 3,656,010 |
def sup(content, accesskey:str ="", class_: str ="", contenteditable: str ="",
data_key: str="", data_value: str="", dir_: str="", draggable: str="",
hidden: str="", id_: str="", lang: str="", spellcheck: str="",
style: str="", tabindex: str="", title: str="", translate: str=""):
"""
Returns superscript.\n
`content`: Contents of the superscript.\n
"""
g_args = global_args(accesskey, class_, contenteditable, data_key, data_value,
dir_, draggable, hidden, id_, lang, spellcheck, style,
tabindex, title, translate)
return f"<sup {g_args}>{content}</sup>\n" | dff8635d98f68e5b024fe23cbeaa6a1a9884222f | 3,656,011 |
def isnonempty(value):
"""
Return whether the value is not empty
Examples::
>>> isnonempty('a')
True
>>> isnonempty('')
False
:param value: string to validate whether value is not empty
"""
return value != '' | 0250cb455d8f77027d5cde9101a24683950bbdb2 | 3,656,012 |
def InstallSystem(config, deployment, options):
"""Install the local host from the sysync deployment configuration files."""
installed = {}
# Create fresh temporary directory
Log('Clearing temporary deployment path: %s' % config['deploy_temp_path'])
run.Run('/bin/rm -rf %s' % config['deploy_temp_path'])
run.Run('/bin/mkdir -p %s' % config['deploy_temp_path'])
# Install the packages
result = InstallPackagesLocally(config, deployment, options)
return result | 642eda86228e5575bc7267d9b3a5c3ddc055daf4 | 3,656,013 |
def preprocess_input(x):
"""前処理。"""
return tf.keras.applications.imagenet_utils.preprocess_input(x, mode="torch") | 6795c5e571d67a7908edbe3c3ca0ed5e3412d2f0 | 3,656,014 |
def attribute_to_partner_strict(partner, partner_string_or_spec, amount):
"""Return the amount attributable to the given partner."""
spec = (
partner_string_or_spec
if isinstance(partner_string_or_spec, dict)
else parse_partner_string(partner_string_or_spec)
)
if partner not in spec:
raise ValueError("Partner not found in partner string: %s" % partner)
v100 = spec[partner] * float(amount.abs())
f_floor = round if isclose(v100, round(v100)) else floor
v = amount.sign() * 0.01 * f_floor(v100)
return Amount(str(v)).with_commodity(amount.commodity) | d7e00b50e8be010d7896b6c51e1e3fcfe73438d2 | 3,656,015 |
import math
def drawLines(img, lines, color=(255,0,0)):
"""
Draw lines on an image
"""
centroids = list()
r_xs = list()
r_ys = list()
for line_ in lines:
for rho,theta in line_:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
slope = (y1 - y0) / float(x1 - x0)
angle = math.degrees(math.atan(slope))
if abs(angle) > 80:
# print(img.shape[1])
h_layout = line((0, 0), (img.shape[1], 0))
h_layout_lower = line((0, img.shape[0]), (img.shape[1], img.shape[0]))
r = intersection2(h_layout, line((x1, y1), (x2, y2)))
r_lower = intersection2(h_layout_lower, line((x1, y1), (x2, y2)))
# cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
# cv2.line(img, (int(r[0]), int(r[1])), (int(r_lower[0]), int(r_lower[1])), color, 2)
# print('min(r, r_lower), max(r, r_lower) :', np.min(np.array([r, r_lower])), np.max(np.array([r, r_lower])))
# min max 의 최소 최대 Range 를 정해주어야 한다. #
if np.min(np.array([r, r_lower])) >= 0 and np.max(np.array([r, r_lower])) < max(img.shape):
center_p = (int((r[0] + r_lower[0]) / 2), int((r[1] + r_lower[1])/ 2))
centroids.append(center_p)
r_xs.append((r[0], r_lower[0]))
r_ys.append((r[1], r_lower[1]))
# cv2.circle(img, center_p, 10, (255, 0, 255), -1)
# cv2.line(img, (int(0), int(0)), (int(0), int(img.shape[0])), color, 2)
# cv2.line(img, (int(img.shape[1]), int(0)), (int(img.shape[1]), int(img.shape[0])), color, 2)
# cv2.circle(img, (0, int(img.shape[0] / 2)), 10, (255, 0, 255), -1)
# cv2.circle(img, (img.shape[1], int(img.shape[0] / 2)), 10, (255, 0, 255), -1)
centroids.append((0, int(img.shape[0] / 2)))
centroids.append((img.shape[1], int(img.shape[0] / 2)))
return r_xs, r_ys, centroids | 5918bb1a81d8efae2874f294d927f7b01527d1d1 | 3,656,016 |
import numpy
def moments_of_inertia(geo, amu=True):
""" principal inertial axes (atomic units if amu=False)
"""
ine = inertia_tensor(geo, amu=amu)
moms, _ = numpy.linalg.eigh(ine)
moms = tuple(moms)
return moms | 34153dba5ea49d457ee97d4024a103b0d05c6bd0 | 3,656,017 |
def greenblatt_earnings_yield(stock, date=None, lookback_period=timedelta(days=0), period='FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=None.
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Greenblatt Earnings Yield} = \\frac{\\text{EBIT}}{\\text{EV}}
"""
return earnings_before_interest_and_taxes(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ enterprise_value(stock=stock, date=date, lookback_period=lookback_period, period=period) | 333b12b609523ab16eeb1402d0219264a3a159e3 | 3,656,018 |
import shutil
def remove_directory(dir_path):
"""Delete a directory"""
if isdir(dir_path):
try:
shutil.rmtree(dir_path)
return ok_resp(f'Directory removed {dir_path}')
except TypeError as err_obj:
return err_resp(f'Failed to remove directory. {err_obj}')
except FileNotFoundError as err_obj:
return err_resp(f'Directory not found: {err_obj}')
except OSError as err_obj:
return err_resp(f'Failed to delete directory: {err_obj}')
except PermissionError as err_obj:
return err_resp(f'Failed to delete directory: {err_obj}')
return ok_resp(f'Not a directory {dir_path}') | c174568c024cff1948bdf78206e49c2ca40c6b25 | 3,656,019 |
def new_eps_after(since_ep):
"""
:param since_ep: Episode instance
:return: Number of episodes since then
"""
session = Session.object_session(since_ep)
series = since_ep.series
series_eps = session.query(Episode).join(Episode.series).\
filter(Series.id == series.id)
if series.identified_by == 'ep':
if since_ep.season is None or since_ep.number is None:
log.debug('new_eps_after for %s falling back to timestamp because latest dl in non-ep format' %
series.name)
return series_eps.filter(Episode.first_seen > since_ep.first_seen).count()
return series_eps.filter((Episode.identified_by == 'ep') &
(((Episode.season == since_ep.season) & Episode.number > since_ep.number) |
Episode.season > since_ep.season)).count()
elif series.identified_by == 'seq':
return series_eps.filter(Episode.number > since_ep.number).count()
elif series.identified_by == 'id':
return series_eps.filter(Episode.first_seen > since_ep.first_seen).count()
else:
log.debug('unsupported identified_by %s', series.identified_by)
return 0 | 218d19672d0fe3b0b14fa6443c6c00bbd12ba495 | 3,656,020 |
from pathlib import Path
from typing import Iterator
import os
def parse_json_main_index(out_dir: Path=OUTPUT_DIR) -> Iterator[Link]:
"""parse an archive index json file and return the list of links"""
index_path = os.path.join(out_dir, JSON_INDEX_FILENAME)
if os.path.exists(index_path):
with open(index_path, 'r', encoding='utf-8') as f:
links = pyjson.load(f)['links']
for link_json in links:
try:
yield Link.from_json(link_json)
except KeyError:
try:
detail_index_path = Path(OUTPUT_DIR) / ARCHIVE_DIR_NAME / link_json['timestamp']
yield parse_json_link_details(str(detail_index_path))
except KeyError:
# as a last effort, try to guess the missing values out of existing ones
try:
yield Link.from_json(link_json, guess=True)
except KeyError:
print(" {lightyellow}! Failed to load the index.json from {}".format(detail_index_path, **ANSI))
continue
return () | 237b3887cbba40734b820b59200513d614697862 | 3,656,021 |
from re import DEBUG
import sys
def header_maxperdisc(ctx, institution, requirement_id):
"""
header_maxperdisc : maxperdisc label? ;
"""
if DEBUG:
print(f'*** header_maxperdisc({class_name(ctx)=}, {institution=}, {requirement_id=}',
file=sys.stderr)
return_dict = {'label': get_label(ctx)}
maxperdisc_ctx = ctx.maxperdisc()
return_dict.update(maxperdisc(maxperdisc_ctx, institution, requirement_id))
return {'header_maxperdisc': return_dict} | f0e0d35b75692ec7daae12acedadd0b834994b7e | 3,656,022 |
def set_route_queue(path_list,user_position,sudden_id,sudden_xy,pi):
"""
最後の患者が一番近い医師が行くようにする
"""
minimum_dis = 100
minimum_idx = 0
for i in range(len(path_list)):
dis = np.sqrt((user_position[path_list[i][-2]][0] - sudden_xy[0])**2 + (user_position[path_list[i][-2]][1] - sudden_xy[1])**2)
if(dis < minimum_dis):
minimum_dis = dis
minimum_idx = path_list[i][-2]
pi_idx = [i for i, x in enumerate(pi) if x == minimum_idx]
pi.insert(pi_idx[0]+1,sudden_id)
return pi | 0425c3edf2d488680ccb54661e79698a506e4fe4 | 3,656,023 |
def add(x, y):
"""Add two numbers together."""
return x+y | 92015156eac5bc9cc0be3b1812f9c0766f23020c | 3,656,024 |
import tempfile
import os
def _get_thintar_prefix(tarname):
"""
Make sure thintar temporary name is concurrent and secure.
:param tarname: name of the chosen tarball
:return: prefixed tarname
"""
tfd, tmp_tarname = tempfile.mkstemp(
dir=os.path.dirname(tarname),
prefix=".thin-",
suffix=os.path.splitext(tarname)[1],
)
os.close(tfd)
return tmp_tarname | e893309972742ffb52fd13911b5805e51b2baadc | 3,656,025 |
import requests
def retry_session(tries=2,
backoff_factor=0.1,
status_forcelist=(500, 502, 504),
session=None):
"""
Parameters
----------
tries : int, number of retires.
backoff_factor : A backoff factor to apply between attempts after the
second try (most errors are resolved immediately by a second try without
a delay). urllib3 will sleep for: {backoff factor} * (2 ^ ({number of
total retries} - 1)) seconds. If the backoff_factor is 0.1, then sleep()
will sleep for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be
longer than Retry.BACKOFF_MAX.
status_forcelist :
Retries are made on any HTTP responses in this list. Default values
include the following:
- 500: Internal Server Error.
- 502: Bad Gateway.
- 504: Gateway Timeout.
session
Returns
-------
"""
session = session or requests.Session()
retry = Retry(
total=tries,
read=tries,
connect=tries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry, pool_block=True)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | 5766c4623e0e53f4353de1e58080c1ad5c9b4080 | 3,656,026 |
def vc(t, delta, beta):
"""velocity correlation of locus on rouse polymer. beta = alpha/2."""
return ( np.power(np.abs(t - delta), beta)
+ np.power(np.abs(t + delta), beta)
- 2*np.power(np.abs(t), beta)
)/( 2*np.power(delta, beta) ) | 89eff8a8cdb0e84a69e7990ebf0c128ca27ecea8 | 3,656,027 |
def algorithm(name):
"""
A function decorator that is used to add an algorithm's Python class to the
algorithm_table.
Args:
A human readable label for the algorithm that is used to identify it in
the GUI
"""
def decorator(class_):
algorithm_table[name] = class_
return class_
return decorator | 8f67fec3f1933dc0ea041322fcf041f2247bc638 | 3,656,028 |
def comp_easy():
"""Get easy components."""
return Components(ewlaps, gi_setting.DEFAULT_EASY) | d15093dce67657b05665d7a9373d1328b4171f91 | 3,656,029 |
def play(player1, player2, rounds=1, verbose=False, symdict=None):
"""Play a number of `rounds` matches between the two players and return
the score $S = sum_j a_j$, where
a_j = 1 if player1 wone --or-- -1 if player2 wone --or-- 0 otherwise.
"""
if player1 is player2:
raise AttributeError("Players match...")
if player1._rules is not player2._rules:
raise AttributeError("Different rules sets...")
if symdict is None:
symdict = range(len(pl1._rules))
score = [0, 0, 0]
results = ["Player1 wins.", "Tie.", "Player 2 wins."]
playiter = xrange(rounds) if verbose else Progress(xrange(rounds))
for i in playiter:
res1, res2 = player1.play(), player2.play()
player1._memory.append((res1, res2))
player2._memory.append((res2, res1))
resind = 1 - player1._rules[res1][res2]
score[resind] += 1
if verbose:
print("{} vs {}: {}".format(symdict[res1], symdict[res2],
results[resind]))
print(score)
return score | 28e0cc41d664a6681b4af1216d0de6f1a2871f04 | 3,656,030 |
def calc_deltabin_3bpavg(seq, files, bin_freqs, seqtype = "fastq"):
"""
At each position (starting at i), count number of sequences where
region (i):(i+3) is mutated. This is sort of a rolling average and not critical
to the result. It just ends up a bit cleaner than if we looked at a single
base pair since. We are assuming that on average a mutation messes up binding,
however this is not always the case. For example, especially with RNAP, there might
be a couple positions that are not-at-all optimal for DNA binding.
Parameter
---------
seq: wild-type sequence of library region
files: filenames (used to identify bin number, '...bin*.fastq')
bin_freqs: numpy array (np.zeros([# bins, # letters (i.e. 4),
length sequence]) that contained the letter frequences from each
bin.
seqtype: sequence file type (i.e. '.fastq' or '.fasta')
Returns
-------
avgBin_counts: array 1*seqLength; contains counts used to calculate average
of mutated nucleotides at each position.
avgBin-avgbin_WT: average bin of mutated nucleotides at each position
relative to wild-type average bin.
"""
seqLength = len(seq)
avgBin_counts = np.zeros([len(files),seqLength])
avgBin = np.zeros(seqLength)
#filecount = 0
avgbin_WT = 0
for j in range(0,len(files)):
avgbin_WT += ( (j+1)*bin_freqs[j,:,0].sum() )/ bin_freqs[:,:,0].sum()
print('average_bin_WT', avgbin_WT)
for i in range(0,seqLength-2):
for j, fname in enumerate(files):
count = 0
binnumber = int(fname[-7]) - 1
for rec in SeqIO.parse(fname, seqtype):
if (rec.seq[i:(i+2)] != seq[i:(i+2)]):
count += 1
avgBin_counts[binnumber,i] = count
for i in range(0,seqLength-2):
for j in range(0,len(files)):
avgBin[i] += ( (j+1)*avgBin_counts[j,i] )/avgBin_counts[:,i].sum()
return avgBin_counts, (avgBin-avgbin_WT) | 5ea614e7280d6ed288ea03e63e86e3129d4e4994 | 3,656,031 |
def make_right_handed(l_csl_p1, l_p_po):
"""
The function makes l_csl_p1 right handed.
Parameters
----------------
l_csl_p1: numpy.array
The CSL basis vectors in the primitive reference frame of crystal 1.
l_p_po: numpy.array
The primitive basis vectors of the underlying lattice in the orthogonal
reference frame.
Returns
-----------
t1_array: numpy.array
Right handed array
"""
l_csl_po1 = l_p_po.dot(l_csl_p1)
t1_array = np.array(l_csl_p1, dtype='double')
t2_array = np.array(l_csl_p1, dtype='double')
if (nla.det(l_csl_po1) < 0):
t1_array[:, 0] = t2_array[:, 1]
t1_array[:, 1] = t2_array[:, 0]
return t1_array | 3b5e3f21e6da5292fb84eb632ddcfa2ec52507ee | 3,656,032 |
import logging
def process_task(f, module_name, class_name, ftype, f_parameters, f_returns,
task_kwargs, num_nodes, replicated, distributed,
on_failure, time_out):
"""
Function that submits a task to the runtime.
:param f: Function or method
:param module_name: Name of the module containing the function/method
(including packages, if any)
:param class_name: Name of the class (if method)
:param ftype: Function type
:param f_parameters: Function parameters (dictionary {'param1':Parameter()}
:param f_returns: Function returns (dictionary {'*return_X':Parameter()}
:param task_kwargs: Decorator arguments
:param num_nodes: Number of nodes that the task must use
:param replicated: Boolean indicating if the task must be replicated
:param distributed: Boolean indicating if the task must be distributed
:param on_failure: Action on failure
:param time_out: Time for a task time out
:return: The future object related to the task return
"""
if __debug__:
logger.debug("TASK: %s of type %s, in module %s, in class %s" %
(f.__name__, ftype, module_name, class_name))
app_id = 0
# Check if the function is an instance method or a class method.
has_target = ftype == FunctionType.INSTANCE_METHOD
fo = None
if f_returns:
fo = _build_return_objects(f_returns)
num_returns = len(f_returns)
# Get path
if class_name == '':
path = module_name
else:
path = module_name + '.' + class_name
# Infer COMPSs types from real types, except for files
_serialize_objects(f_parameters)
# Build values and COMPSs types and directions
vtdsc = _build_values_types_directions(ftype,
f_parameters,
f_returns,
f.__code_strings__)
values, names, compss_types, compss_directions, compss_streams, \
compss_prefixes, content_types = vtdsc # noqa
# Get priority
has_priority = task_kwargs['priority']
# Signature and other parameters:
signature = '.'.join([path, f.__name__])
# num_nodes = 1 # default due to not MPI decorator yet
# replicated = False # default due to not replicated tag yet
# distributed = False # default due to not distributed tag yet
if __debug__:
# Log the task submission values for debugging purposes.
if logger.isEnabledFor(logging.DEBUG):
values_str = ' '.join(str(v) for v in values)
types_str = ' '.join(str(t) for t in compss_types)
direct_str = ' '.join(str(d) for d in compss_directions)
streams_str = ' '.join(str(s) for s in compss_streams)
prefixes_str = ' '.join(str(p) for p in compss_prefixes)
names_str = ' '.join(x for x in names)
ct_str = ' '.join(str(x) for x in content_types)
logger.debug("Processing task:")
logger.debug("\t- App id: " + str(app_id))
logger.debug("\t- Path: " + path)
logger.debug("\t- Function name: " + f.__name__)
logger.debug("\t- On failure behavior: " + on_failure)
logger.debug("\t- Task time out: " + str(time_out))
logger.debug("\t- Signature: " + signature)
logger.debug("\t- Priority: " + str(has_priority))
logger.debug("\t- Has target: " + str(has_target))
logger.debug("\t- Num nodes: " + str(num_nodes))
logger.debug("\t- Replicated: " + str(replicated))
logger.debug("\t- Distributed: " + str(distributed))
logger.debug("\t- Values: " + values_str)
logger.debug("\t- Names: " + names_str)
logger.debug("\t- COMPSs types: " + types_str)
logger.debug("\t- COMPSs directions: " + direct_str)
logger.debug("\t- COMPSs streams: " + streams_str)
logger.debug("\t- COMPSs prefixes: " + prefixes_str)
logger.debug("\t- Content Types: " + ct_str)
# Check that there is the same amount of values as their types, as well
# as their directions, streams and prefixes.
assert (len(values) == len(compss_types) == len(compss_directions) ==
len(compss_streams) == len(compss_prefixes) == len(content_types))
# Submit task to the runtime (call to the C extension):
# Parameters:
# 0 - <Integer> - application id (by default always 0 due to it is
# not currently needed for the signature)
# 1 - <String> - path of the module where the task is
#
# 2 - <String> - behavior if the task fails
#
# 3 - <String> - function name of the task (to be called from the
# worker)
# 4 - <String> - priority flag (true|false)
#
# 5 - <String> - has target (true|false). If the task is within an
# object or not.
# 6 - [<String>] - task parameters (basic types or file paths for
# objects)
# 7 - [<Integer>] - parameters types (number corresponding to the type
# of each parameter)
# 8 - [<Integer>] - parameters directions (number corresponding to the
# direction of each parameter)
# 9 - [<Integer>] - parameters streams (number corresponding to the
# stream of each parameter)
# 10 - [<String>] - parameters prefixes (sting corresponding to the
# prefix of each parameter)
compss.process_task(app_id,
signature,
on_failure,
time_out,
has_priority,
num_nodes,
replicated,
distributed,
has_target,
num_returns,
values,
names,
compss_types,
compss_directions,
compss_streams,
compss_prefixes,
content_types)
# Return the future object/s corresponding to the task
# This object will substitute the user expected return from the task and
# will be used later for synchronization or as a task parameter (then the
# runtime will take care of the dependency.
return fo | 3527b5eb51c1ed5b5a9000e48ff940a63f1610db | 3,656,033 |
def company(anon, obj, field, val):
"""
Generates a random company name
"""
return anon.faker.company(field=field) | 95580147817a37542f75e2c728941a159cd30bd3 | 3,656,034 |
def delete_schedule():
"""
При GET запросе возвращает страницу для удаления расписания.
При POST запросе, удаляет выбранное расписани
(Запрос на удаление идэт с главной страницы(func index), шаблона(template) функция не имеет).
"""
if not check_admin_status():
flash(f'У вас нет прав для просмотра данной страницы!', 'error')
app.logger.warning(f"Сотрудник с недостаточным уровнем допуска попытался удалить расписание: {get_user_info()}")
return redirect(url_for('index'))
schedule_id = request.args.get('schedule_id')
ScheduleCleaning.query.filter_by(id=schedule_id).delete()
db.session.commit()
return redirect(url_for('index')) | 1e73c757956d4bd78f3a093e2a2ddfde894aeac5 | 3,656,035 |
import json
def dict_from_JSON(JSON_file: str) -> dict:
"""
Takes a WDL-mapped json file and creates a dict containing the bindings.
:param JSON_file: A required JSON file containing WDL variable bindings.
"""
json_dict = {}
# TODO: Add context support for variables within multiple wdl files
with open(JSON_file) as data_file:
data = json.load(data_file)
for d in data:
if isinstance(data[d], str):
json_dict[d] = f'"{data[d]}"'
else:
json_dict[d] = data[d]
return json_dict | 98deccce943e5506233f1e20d73c4d03eda16858 | 3,656,036 |
def show(id):
"""Renderiza a página de um político específico."""
p = Politico.query.get_or_404(id)
# Aplica os filtros de mes, ano e a paginação
mes, ano, tipo, page = (request.args.get("mes"),
request.args.get("ano", 2020, type=int),
request.args.get("tipo"),
request.args.get("page", 1, type=int))
form = form_filtro_despesas(parlamentar=p, ano=ano)
if form.validate_on_submit():
# Retira os filtros do tipo `mes=""` (Todos os meses)
# Deixa somente os definidos como `mes=1`, etc.
# Depois redireciona para aplicar os filtros
params = {k: v for k, v in form.data.items() if v}
# Remove o csrf_token antes de redirecionar
params.pop("csrf_token")
return redirect(url_for("pages.show", id=id, **params))
pagination = p.despesas(ano, mes).paginate(page, 40,
error_out=True)
total_gasto = Reembolso.total_gasto(p, ano=ano, mes=mes)
return render_template("pages/show.html",
parlamentar=p,
pagination=pagination,
total_gasto=total_gasto,
form=form) | b06ce6275819d32e9dff1945e97425ee696c1313 | 3,656,037 |
def map_datapoint(data_point: DATAPOINT_TYPE) -> SFX_OUTPUT_TYPE:
"""
Create dict value to send to SFX.
:param data_point: Dict with values to send
:type data_point: dict
:return: SignalFx data
:rtype: dict
"""
return {
"metric": data_point["metric"],
"value": data_point["value"],
"dimensions": dict(data_point["dimensions"], **default_dimensions) if "dimensions" in data_point else default_dimensions,
} | cf5d7eb1bded092adb2b002ee93ad168e696230a | 3,656,038 |
def write_obs(mdict, obslist, flag=0):
"""
"""
# Print epoch
epoch = mdict['epoch']
res = epoch.strftime("> %Y %m %d %H %M %S.") + '{0:06d}0'.format(int(epoch.microsecond))
# Epoch flag
res += " {0:2d}".format(flag)
# Num sats
res += " {0:2d}".format(len(mdict)-1)
res += '\n'
# For each satellite, print obs
for sat in mdict:
if sat == 'epoch':
continue
res += sat
obstypes = obslist[sat[0]]
for o in obstypes:
try:
meas = mdict[sat][o]
except KeyError:
meas = 0.0
# BeiDou satellites can have long ranges if GEO satellites are used
if meas > 40e6:
meas = 0.0
res += '{0:14.3f}00'.format(meas)
res += '\n'
return res | 5a91b02fce07f455f4442fe6fbf76d3609f5a74e | 3,656,039 |
from typing import Optional
from typing import Union
import fsspec
def open_view(
path: str,
*,
filesystem: Optional[Union[fsspec.AbstractFileSystem, str]] = None,
synchronizer: Optional[sync.Sync] = None,
) -> view.View:
"""Open an existing view.
Args:
path: View storage directory.
filesystem: The file system used to access the view.
synchronizer: The synchronizer used to synchronize the view.
Returns:
The opened view.
Example:
>>> view = open_view("/home/user/myview")
"""
return view.View.from_config(path,
filesystem=filesystem,
synchronizer=synchronizer) | b12471f59ef78e444a43c0e766cb6b4237e65338 | 3,656,040 |
def smi2xyz(smi, forcefield="mmff94", steps=50):
"""
Example:
utils.smi2xyz("CNC(C(C)(C)F)C(C)(F)F")
returns:
C 1.17813 0.06150 -0.07575
N 0.63662 0.20405 1.27030
C -0.86241 0.13667 1.33270
C -1.46928 -1.21234 0.80597
C -0.94997 -2.44123 1.55282
C -2.99527 -1.22252 0.74860
F -1.08861 -1.36389 -0.50896
C -1.34380 0.44926 2.78365
C -0.84421 1.76433 3.34474
F -2.70109 0.48371 2.84063
F -0.94986 -0.53971 3.63106
H 0.78344 0.82865 -0.74701
H 0.99920 -0.92873 -0.50038
H 2.26559 0.18049 -0.03746
H 1.03185 -0.51750 1.87094
H -1.24335 0.93908 0.68721
H -1.29943 -2.47273 2.58759
H -1.27996 -3.36049 1.05992
H 0.14418 -2.47324 1.55471
H -3.35862 -0.36599 0.16994
H -3.34471 -2.11983 0.22567
H -3.46364 -1.21709 1.73400
H -1.20223 2.60547 2.74528
H -1.22978 1.89248 4.36213
H 0.24662 1.79173 3.40731
"""
mol = pybel.readstring("smi", smi)
mol.addh() # add hydrogens, if this function is not called, pybel will output xyz string with no hydrogens.
mol.make3D(forcefield=forcefield, steps=steps)
# possible forcefields: ['uff', 'mmff94', 'ghemical']
mol.localopt()
return _to_pyscf_atom(mol) | 083bbc1a242a3f5f247fc6f7066e099dab654b7a | 3,656,041 |
from typing import Optional
from typing import Tuple
from typing import List
def pgm_to_pointcloud(
depth_image: np.ndarray, color_image: Optional[np.ndarray],
intrinsics: Tuple[float, float, float, float],
distortion: List[float]) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Fast conversion of opencv images to pointcloud.
Takes ~7 ms per 1280x720 RGBD on my corp laptop (hirak).
Args:
depth_image: OpenCV image.
color_image: Corresponding color image, if colors for each point is desired.
intrinsics: fx, fy, cx, cy.
distortion: Standard distoriton params k1, k2, p1, p2, [k3, [k4, k5, k6]].
Returns:
points: Nx3 array of points in space.
colors: Nx3 array of colors, each row an RGB. None if color_image is None.
"""
# The code below is optimized for speed, further optimizations may also be
# possible.
x_axis, y_axis = np.mgrid[0:depth_image.shape[1], 0:depth_image.shape[0]]
valid = ~np.isnan(depth_image)
x_axis = x_axis.T[valid]
y_axis = y_axis.T[valid]
depth = depth_image[valid] * _DEPTH_SCALE
x_and_y = np.vstack([x_axis, y_axis]).astype(float)
fx, fy, cx, cy = intrinsics
camera_matrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
x_and_y = cv2.undistortPoints(x_and_y, camera_matrix, np.array(distortion))
x_and_y = x_and_y.T.reshape(2, -1)
points = np.vstack([x_and_y * depth, depth]).T
colors = None
if color_image is not None:
colors = color_image[valid]
if len(colors.shape) > 1 and colors.shape[1] == 3:
# OpenCV uses BGR. Point cloud libraries like to use RGB.
colors[:, [0, 2]] = colors[:, [2, 0]]
else:
colors = np.vstack([colors, colors, colors]).T
return points, colors | 574d514c216f0db1f90bf277dc78a5b5dcc2535a | 3,656,042 |
def matching_poss(poss_1, poss_2):
"""Count how many rows the possibilities have in common.
Arguments:
poss_1 {np.array} -- possibilities 1
poss_2 {np.array} -- possibilities 2
Returns:
int -- the count/matches
"""
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
return matches | 096214d8e2115afd21cbd76b28cafa54574cdfb1 | 3,656,043 |
from collections import Counter
def unk_emb_stats(sentences, emb):
"""Compute some statistics about unknown tokens in sentences
such as "how many sentences contain an unknown token?".
emb can be gensim KeyedVectors or any other object implementing
__contains__
"""
stats = {
"sents": 0,
"tokens": 0,
"unk_tokens": 0,
"unk_types": 0,
"unk_tokens_lower": 0,
"unk_types_lower": 0,
"sents_with_unk_token": 0,
"sents_with_unk_token_lower": 0}
all_types = set()
for sent in sentences:
stats["sents"] += 1
any_unk_token = False
any_unk_token_lower = False
types = Counter(sent)
for ty, freq in types.items():
all_types.add(ty)
stats["tokens"] += freq
unk = ty not in emb
if unk:
any_unk_token = True
stats["unk_types"] += 1
stats["unk_tokens"] += freq
if unk and ty.lower() not in emb:
any_unk_token_lower = True
stats["unk_types_lower"] += 1
stats["unk_tokens_lower"] += freq
if any_unk_token:
stats["sents_with_unk_token"] += 1
if any_unk_token_lower:
stats["sents_with_unk_token_lower"] += 1
stats["types"] = len(all_types)
return stats | 221b88e2124f3b8da2976a337476a11a7276a470 | 3,656,044 |
import os
def basename(path: str) -> str:
"""Returns the basename removing path and extension."""
return os.path.splitext(os.path.basename(path))[0] | 63e8e0220d1c2a9fc5b30d4bff2b609517d8cd18 | 3,656,045 |
from typing import List
async def search_dcu(
ldap_conn: LDAPConnection, dcu_id: str = None, uid: str = None, fullname: str = None
) -> List[DCUUser]:
"""
Seach DCU AD for user
Args:
ldap_conn: LDAP connection to use for searching
uid: Usersname to search for
dcu_id: dcu student id number
fullname: Users full name
Returns:
A list of user found in ad matching search criteria
"""
query = "".join(
filter(
None,
[
f"(displayName={fullname})" if fullname else None,
f"(cn={uid})" if uid else None,
f"(id={dcu_id})" if dcu_id else None,
],
)
)
if not query:
return []
res = await ldap_conn.search("o=ad,o=dcu,o=ie", f"(&{query})", attributes=DCU_ATTR)
return [DCUUser.from_ldap(user) for user in res] | 32444b30f1463332f51720eef3167c3495deeaec | 3,656,046 |
def jump(inst_ptr, program, direction):
"""Jump the instruction pointer in the program until matching bracket"""
count = direction
while count != 0:
inst_ptr += direction
char = program[inst_ptr]
if char == '[':
count += 1
elif char == ']':
count -= 1
else:
pass
return inst_ptr | 76c6c4dcf4dbc452e9f2b252522871fcca95c75d | 3,656,047 |
import gecosistema_core
import os
import math
def htmlResponse(environ, start_response=None, checkuser=False):
"""
htmlResponse - return a Html Page
"""
if checkuser and not check_user_permissions(environ):
environ["url"] = justpath(environ["SCRIPT_FILENAME"])+"/back.html"
return htmlResponse(environ, start_response)
url = environ["url"] if "url" in environ else normpath(environ["SCRIPT_FILENAME"])
url = forceext(url, "html")
DOCUMENT_ROOT = environ["DOCUMENT_ROOT"] if "DOCUMENT_ROOT" in environ else ""
#HTTP_COOKIE = getCookies(environ)
if not isfile(url):
return httpResponseNotFound(start_response)
workdir = justpath(url)
index_html = justfname(url)
jss = (DOCUMENT_ROOT + "/lib/js",
justpath(url),)
csss = (DOCUMENT_ROOT + "/lib/css",
DOCUMENT_ROOT + "/lib/js",
DOCUMENT_ROOT + "/lib/images",
justpath(url),)
env = Environment(loader=FileSystemLoader(workdir))
t = env.get_template(index_html)
variables = {
"loadjs": loadlibs(jss,"js"),
"loadcss": loadlibs(csss,"css"),
"APPNAME": juststem(workdir),
"os": os,
"math": math,
"gecosistema_core": gecosistema_core,
"environ":environ,
"__file__":url
}
html = t.render(variables).encode("utf-8","replace")
return httpResponseOK(html, start_response) | bc1ee0367b64c7cfc915ab66e9c519ad101f939e | 3,656,048 |
def center_image(IM, method='com', odd_size=True, square=False, axes=(0, 1),
crop='maintain_size', verbose=False, center=_deprecated,
**kwargs):
"""
Center image with the custom value or by several methods provided in
:func:`find_origin()` function.
Parameters
----------
IM : 2D np.array
The image data.
method : tuple or str
either a tuple (float, float), the coordinate of the origin of the
image in the (row, column) format, or a string to specify an automatic
centering method:
``image_center``
the center of the image is used as the origin. The trivial result.
``com``
the origin is found as the center of mass.
``convolution``
the origin is found as the maximum of autoconvolution of the image
projections along each axis.
``gaussian``
the origin is extracted from a fit to a Gaussian function.
This is probably only appropriate if the data resembles a
gaussian.
``slice``
the image is broken into slices, and these slices compared for
symmetry.
odd_size : boolean
if ``True``, the returned image will contain an odd number of columns.
Most of the transform methods require this, so it's best to set this
to ``True`` if the image will subsequently be Abel-transformed.
square : bool
if ``True``, the returned image will have a square shape.
crop : str
determines how the image should be cropped. The options are:
``maintain_size``
return image of the same size. Some regions of the original image
may be lost, and some regions may be filled with zeros.
``valid_region``
return the largest image that can be created without padding.
All of the returned image will correspond to the original image.
However, portions of the original image will be lost.
If you can tolerate clipping the edges of the image, this is
probably the method to choose.
``maintain_data``
the image will be padded with zeros such that none of the original
image will be cropped.
axes : int or tuple
center image with respect to axis ``0`` (vertical), ``1`` (horizontal),
or both axes ``(0, 1)`` (default).
Returns
-------
out : 2D np.array
centered image
"""
if center is not _deprecated:
_deprecate('abel.tools.center.center_image() '
'argument "center" is deprecated, use "method" instead.')
method = center
rows, cols = IM.shape
if odd_size and cols % 2 == 0:
# drop rightside column
IM = IM[:, :-1]
rows, cols = IM.shape
if square and rows != cols:
# make rows == cols, but maintain approx. center
if rows > cols:
diff = rows - cols
trim = diff//2
if trim > 0:
IM = IM[trim: -trim] # remove even number of rows off each end
if diff % 2:
IM = IM[: -1] # remove one additional row
else:
# make rows == cols, check row oddness
if odd_size and rows % 2 == 0:
IM = IM[:-1, :]
rows -= 1
xs = (cols - rows)//2
IM = IM[:, xs:-xs]
rows, cols = IM.shape
# origin is in (row, column) format!
if isinstance(method, string_types):
origin = find_origin(IM, method=method, verbose=verbose, **kwargs)
else:
origin = method
centered_data = set_center(IM, origin=origin, crop=crop, axes=axes,
verbose=verbose)
return centered_data | 7b9793d720228a246df07c08a2aeda861108f92e | 3,656,049 |
import pysam
from cyvcf2 import VCF
import gzip
import os
def check_header(install_path):
"""Method to check the final genomics headers have a header or not
check_header
============
This method is going to go through each of the files that were created by the recipe,
and it will check if the those files have a header or not.
sam/bam/cram, vcf/bcf, gtf/gff/gff3, bed/bedGraph, csv, txt files require a header and if no header is provided
check-recipe will fail.
Other files that don't have header will be given a warning. GGD expects most files to have
a header. Some files are okay not to have headers, but if a header can be added it should be.
For each file, the file header and first 5 lines of the file body will be provided to stdout.
Parameters:
-----------
1) install_path: (str) The path to the directory where the files have been installed into.
Returns:
+++++++
(bool) True or False.
- True if a header exist or if only a warning was given
- False if a header does not exists and is required
"""
print(
":ggd:check-recipe: Checking that the final files have headers if appropriate\n"
)
installed_files = os.listdir(install_path)
for file_name in [
x for x in installed_files if os.path.isfile(os.path.join(install_path, x))
]:
f_path = os.path.join(install_path, file_name)
## Check for an index file
if file_name.strip().split(".")[-1] in set(
["tbi", "bai", "crai", "fai", "tar", "bz2", "bw", "csi", "gzi"]
):
continue
## Skip fasta or fastq files
if any(x in file_name for x in [".fasta", ".fa", ".fastq", ".fq"]):
continue
## Check for sam/bam/cram files
if any(x in file_name for x in [".sam", ".bam", ".cram"]):
try:
samfile = pysam.AlignmentFile(f_path, check_sq=False)
header = samfile.header
if any(header.lengths):
print(
":ggd:check-recipe: Header found in file {name}\n".format(
name=file_name
)
)
print("Head of file:")
print("---------------------------")
print(str(header).strip())
for i, read in enumerate(samfile):
print(read)
if i >= 4:
break
print("---------------------------\n")
else:
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for sam/bam/cram files\n"
)
return False
except (ValueError, IOError, Exception) as e:
print(str(e))
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for sam/bam/cram files\n"
)
return False
## Check vcf/bcf files
elif any(x in file_name for x in [".vcf", ".bcf"]):
try:
vcffile = VCF(f_path)
header = str(vcffile.raw_header)
if header:
print(
":ggd:check-recipe: Header found in file {name}\n".format(
name=file_name
)
)
print("Head of file:")
print("---------------------------")
print(str(header).strip())
for i, var in enumerate(vcffile):
print(var)
if i >= 4:
break
print("---------------------------\n")
else:
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for vcf/bcf files\n"
)
return False
except IOError as e:
print(str(e))
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for vcf/bcf files\n"
)
return False
## Check other files
else:
try:
file_handler = (
gzip.open(f_path) if f_path.endswith(".gz") else open(f_path)
)
header = []
body = []
try:
for line in file_handler:
if type(line) != str:
line = line.strip().decode("utf-8")
if len(line) > 0 and str(line)[0] in set(["#", "!", "^"]):
header.append(str(line).strip())
else:
body.append(str(line).strip())
if len(body) > 4:
break
except UnicodeDecodeError:
print(
":ggd:check-recipe: Cannot decode file contents into unicode.\n"
)
pass
if header:
print(
":ggd:check-recipe: Header found in file {name}\n".format(
name=file_name
)
)
print("Head of file:")
print("---------------------------")
print("\n".join(header))
print("\n".join(body))
print("---------------------------\n")
elif any(
x in file_name
for x in [
".gtf",
".gff",
".gff3",
".bed",
".bedGraph",
".csv",
".txt",
]
):
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for this type of file\n"
)
print("First 5 lines of file body:")
print("---------------------------")
print("\n".join(body))
print("---------------------------\n")
return False
else:
print(
":ggd:check-recipe: !!WARNING!! No header found for file {name}\n".format(
name=file_name
)
)
print("First 5 lines of file body:")
print("---------------------------")
print("\n".join(body))
print("---------------------------\n")
print(
":ggd:check-recipe: !!WARNING!! GGD requires that any file that can have a header should. Please either add a header or if the file cannot have a header move forward.\n"
)
print(
":ggd:check-recipe: !!WARNING!! IF you move forward without adding a header when one should be added, this recipe will be rejected until a header is added.\n"
)
except IOError as e:
print(":ggd:check-recipe: !!ERROR!!")
print(str(e))
return False
return True | 6a87251381c7c4af0dd3fb3ac8e1cf252c380254 | 3,656,050 |
def get_mapping(mapping_name):
"""
Reads in the given mapping and returns a dictionary of letters to keys. If the given mapping is a dictionary,
does nothing an returns the mapping
mpaping_name can be a path to different file formats
"""
# read in mapping
if type(mapping_name) == str:
if mapping_name.split(".")[-1] == "mst":
mapping = create_map_from_reformulation(mapping_name)
elif mapping_name.split(".")[-1] == "txt":
mapping = create_map_from_txt(mapping_name)
return mapping
else:
return mapping_name | 1e4f99d14b242ba4e8760fafecd83cd32932a92c | 3,656,051 |
def evaluate(model, reward_gen, n_steps=1000000, delta=1):
"""Evaulate the regrets and rewards of a given model based on a given reward
generator
Args:
model (TYPE): Description
n_steps (int, optional): Description
delta (int, optional): Number of steps for feedback delay
reward_gen (TYPE, optional): Description
Returns:
regrets (list): List of regrets for each round. Regret is the maximum
reward minus the selected action's reward for the round
rewards (list): List of rewards for actions taken
"""
regrets = []
rewards = []
last_rewards = []
last_changes = []
last_selected_actions = []
# initialize successs and failures to 0 for all items
successes = np.zeros(model.n_items)
failures = np.zeros(model.n_items)
for step in range(1, n_steps + 1):
reward_vector, item_changed = reward_gen.get_rewards()
# reinitialize the successes and failures if the item has changed
if item_changed:
successes = np.zeros(model.n_items)
failures = np.zeros(model.n_items)
selected_action = model.get_action(item_changed, successes, failures)
regret = (
np.max(reward_gen.reward_probs) - reward_gen.reward_probs[selected_action]
)
regrets.append(regret)
rewards.append(reward_vector[selected_action])
last_rewards.append(reward_vector[selected_action])
last_changes.append(item_changed)
last_selected_actions.append(selected_action)
# record success or failure of action at appropriate index in
#successes or failures
if reward_vector[selected_action] == 1:
successes[selected_action] += 1
else:
failures[selected_action] += 1
# Feedback if delta steps have passed
if step % delta == 0:
model.update(last_selected_actions, last_rewards, last_changes)
last_rewards = []
last_changes = []
last_selected_actions = []
return regrets, rewards | a326e905156f6ac195eeb993878ae651a13a306e | 3,656,052 |
def get_photo_from_response(response: dict):
"""
parse json response and return an Photo
Keyword arguments:
response -- meetup api response in a dict
return -> get or create Photo
"""
photo, create = Photo.objects.get_or_create(meetup_id=response["id"])
# add optional fields
if "highres_link" in response:
photo.highres_link = response["highres_link"]
if "base_url" in response:
photo.base_url = response["base_url"]
if "photo_link" in response:
photo.photo_link = response["photo_link"]
if "thumb_link" in response:
photo.thumb_link = response["thumb_link"]
if "type" in response:
photo.photo_type = response["type"]
photo.save()
return photo | 9f0aeee796c1131424a7f4292a2b712d2bf0158e | 3,656,053 |
from typing import Union
from typing import List
from typing import Tuple
def composition_plot(adata: AnnData, by: str, condition: str, stacked: bool = True, normalize: bool = True,
condition_sort_by: str = None, cmap: Union[str, List[str], Tuple[str]] = None,
**kwds) -> hv.core.element.Element:
"""
Generate a composition plot, which shows the percentage of observations from every condition within each cluster (by).
Args:
adata: Annotated data matrix.
by: Key for accessing variables of adata.var_names or a field of adata.obs used to group the data.
condition: Key for accessing variables of adata.var_names or a field of adata.obs used to compute counts within a group.
stacked: Whether bars are stacked.
normalize: Normalize counts within each group to sum to one.
condition_sort_by: Sort condition within each group by max, mean, natsorted, or None.
cmap: Color map name (hv.plotting.list_cmaps()) or a list of hex colors. See http://holoviews.org/user_guide/Styling_Plots.html for more information.
"""
adata_raw = __get_raw(adata, False)
keys = [by, condition]
adata_df = __get_df(adata, adata_raw, keys)
for column in adata_df:
if not pd.api.types.is_categorical_dtype(adata_df[column]):
adata_df[column] = adata_df[column].astype(str).astype('category')
cmap = __get_category_cmap(adata_raw, adata_df, condition) if cmap is None else __fix_cmap(adata_df, condition,
cmap)
keywords = dict(stacked=stacked, group_label=condition)
keywords.update(kwds)
invert = keywords.get('invert', False)
if not invert and 'rot' not in keywords:
keywords['rot'] = 90
dummy_df = pd.get_dummies(adata_df[condition])
df = pd.concat([adata_df, dummy_df], axis=1)
df = df.groupby(by).agg(np.sum)
if normalize:
df = df.T.div(df.sum(axis=1)).T
if not (pd.api.types.is_categorical_dtype(df.index) and df.index.dtype.ordered):
df = df.loc[natsorted(df.index)]
secondary = dummy_df.columns.values
if condition_sort_by == 'max' or condition_sort_by == 'mean':
secondary_sort = df.values.max(axis=0) if condition_sort_by == 'max' else df.values.mean(axis=0)
index = np.flip(np.argsort(secondary_sort))
secondary = secondary[index]
elif condition_sort_by == 'natsorted':
secondary = natsorted(secondary)
secondary = list(secondary)
p = df.hvplot.bar(by, secondary, cmap=cmap, **keywords)
p.df = df
return p | f2e588c0ce6d195201754885bbd90aae83b49ba7 | 3,656,054 |
def region_of_province(province_in: str) -> str:
"""
Return the corresponding key in ITALY_MAP whose value contains province_in
:param province_in: str
:return: str
"""
region = None
for r in ITALY_MAP:
for p in ITALY_MAP[r]:
if province_in == p:
region = r
return region | 1aa29235d569929a0cfbbc4258d45ba4f0171f3c | 3,656,055 |
def filter_stopwords(words:list)->iter:
"""
Filter the stop words
"""
words = filter(is_not_stopword, words)
return words | a5516886be0ce5c8671ef259baf38b04d61c511f | 3,656,056 |
def numpy_jaccard(box_a, box_b):
"""计算两组矩形两两之间的iou
Args:
box_a: (tensor) bounding boxes, Shape: [A, 4].
box_b: (tensor) bounding boxes, Shape: [B, 4].
Return:
ious: (tensor) Shape: [A, B]
"""
A = box_a.shape[0]
B = box_b.shape[0]
box_a_x1y1 = np.reshape(box_a[:, 2:], (A, 1, 2))
box_a_x1y1 = np.tile(box_a_x1y1, (1, B, 1))
box_b_x1y1 = np.reshape(box_b[:, 2:], (1, B, 2))
box_b_x1y1 = np.tile(box_b_x1y1, (A, 1, 1))
box_a_x0y0 = np.reshape(box_a[:, :2], (A, 1, 2))
box_a_x0y0 = np.tile(box_a_x0y0, (1, B, 1))
box_b_x0y0 = np.reshape(box_b[:, :2], (1, B, 2))
box_b_x0y0 = np.tile(box_b_x0y0, (A, 1, 1))
max_xy = np.minimum(box_a_x1y1, box_b_x1y1)
min_xy = np.maximum(box_a_x0y0, box_b_x0y0)
inter = np.clip((max_xy - min_xy), 0.0, np.inf)
inter = inter[:, :, 0] * inter[:, :, 1]
area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1]))
area_a = np.reshape(area_a, (A, 1))
area_a = np.tile(area_a, (1, B))
area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1]))
area_b = np.reshape(area_b, (1, B))
area_b = np.tile(area_b, (A, 1))
union = area_a + area_b - inter
return inter / union | 1c0aed3c354a9253c5f9278109cd13365941846c | 3,656,057 |
import uuid
def test_get_rule(client_rule_factory, client_response_factory, registered_rule):
"""Check request data that client uses to get a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the get_rule method.
4. Check the rule, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
assert url == "rule/{0}".format(rule_id), "Wrong url"
assert method == "GET", "Wrong method"
assert json is None, "Data has been specified"
response_json = {"rule_id": rule_id}
response_json.update(self._rule_factory.serialize_rule(rule=registered_rule))
return response_json
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
obtained_rule = client.get_rule(rule_id=rule_id)
assert obtained_rule.rule_id == rule_id, "Rule ID has not been set" | 62b8368072cebf0591137357167980a6d710a1f0 | 3,656,058 |
import colorsys
def summaryhsl(all_summaries, summary):
"""
Choose a color for the given system summary to distinguish it from other types of systems.
Returns hue, saturation, and luminance for the start of the range, and how much the hue can be randomly varied while staying distinguishable.
"""
lowest_att = min(att for att, ms in all_summaries)
highest_att = max(att for att, ms in all_summaries)
att_range = highest_att - lowest_att + 1
attractors, monotonic_species = summary
lowest_ms = min(ms for att, ms in all_summaries if att == attractors)
highest_ms = max(ms for att, ms in all_summaries if att == attractors)
ms_range = highest_ms - lowest_ms + 1
bin_width = 1 / (ms_range + 1) / att_range
hue = ((highest_att - attractors) / att_range) + (highest_ms - monotonic_species) * bin_width
variability_squeeze = (2 if att_range > 1 else 1) * (2 if ms_range > 1 else 1)
return hue, 1, colorsys.ONE_THIRD, bin_width / variability_squeeze | 1e874aaa359a5d8bb566809fc2be212df2890885 | 3,656,059 |
def _get_cached_values(instance, translated_model, language_code, use_fallback=False):
"""
Fetch an cached field.
"""
if not appsettings.PARLER_ENABLE_CACHING or not instance.pk or instance._state.adding:
return None
key = get_translation_cache_key(translated_model, instance.pk, language_code)
values = cache.get(key)
if not values:
return None
# Check for a stored fallback marker
if values.get('__FALLBACK__', False):
# Internal trick, already set the fallback marker, so no query will be performed.
instance._translations_cache[translated_model][language_code] = MISSING
# Allow to return the fallback language instead.
if use_fallback:
lang_dict = get_language_settings(language_code)
# iterate over list of fallback languages, which should be already
# in proper order
for fallback_lang in lang_dict['fallbacks']:
if fallback_lang != language_code:
return _get_cached_values(
instance, translated_model, fallback_lang,
use_fallback=False
)
return None
values['master'] = instance
values['language_code'] = language_code
return values | e650eabbfde8b877519b9456dba9021dfa0f78e6 | 3,656,060 |
def tensor_index_by_list(data, list_index):
"""Tensor getitem by list of int and bool"""
data_shape = F.shape(data)
indexes_types = hyper_map(F.typeof, list_index)
if const_utils.judge_indexes_types(indexes_types, mstype.int_type + (mstype.bool_,)):
sub_tuple_index = const_utils.transform_sequence_index(list_index, data_shape[0], const_utils.TENSOR_GETITEM)
if not sub_tuple_index:
data_rank = len(data_shape)
if data_rank == 1:
return const_utils.make_tensor([], data.dtype, ())
return const_utils.make_tensor([], data.dtype, data_shape[1:])
tensor_index = const_utils.make_tensor(sub_tuple_index, mstype.int64)
return F.gather(data, tensor_index, 0)
tuple_index_new = ()
for index in list_index:
tuple_index_new += (index,)
return tensor_index_by_tuple(data, tuple_index_new) | 99702ca58ebd7f316d83687804f09ac0639e3f17 | 3,656,061 |
def sample_ingridient(user, name='Salt'):
"""Create and return a sample ingridient"""
return Ingridient.objects.create(user=user, name=name) | 8904f11164a78959eb8073b80fa349155c1ae185 | 3,656,062 |
def remove_duplicates_from_list(params_list):
"""
Common function to remove duplicates from a list
Author: [email protected]
:param params_list:
:return:
"""
if params_list:
return list(dict.fromkeys(params_list))
return list() | 885b2e048ec672bd2d24fabe25066bc2df3ea8a8 | 3,656,063 |
import os
import logging
import sys
def _exec_task(fn, task, d, quieterr):
"""Execute a BB 'task'
Execution of a task involves a bit more setup than executing a function,
running it with its own local metadata, and with some useful variables set.
"""
if not d.getVarFlag(task, 'task', False):
event.fire(TaskInvalid(task, d), d)
logger.error("No such task: %s" % task)
return 1
logger.debug(1, "Executing task %s", task)
localdata = _task_data(fn, task, d)
tempdir = localdata.getVar('T', True)
if not tempdir:
bb.fatal("T variable not set, unable to build")
# Change nice level if we're asked to
nice = localdata.getVar("BB_TASK_NICE_LEVEL", True)
if nice:
curnice = os.nice(0)
nice = int(nice) - curnice
newnice = os.nice(nice)
logger.debug(1, "Renice to %s " % newnice)
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL", True)
if ionice:
try:
cls, prio = ionice.split(".", 1)
bb.utils.ioprio_set(os.getpid(), int(cls), int(prio))
except:
bb.warn("Invalid ionice level %s" % ionice)
bb.utils.mkdirhier(tempdir)
# Determine the logfile to generate
logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
logbase = logfmt.format(task=task, pid=os.getpid())
# Document the order of the tasks...
logorder = os.path.join(tempdir, 'log.task_order')
try:
with open(logorder, 'a') as logorderfile:
logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
except OSError:
logger.exception("Opening log file '%s'", logorder)
pass
# Setup the courtesy link to the logfn
loglink = os.path.join(tempdir, 'log.{0}'.format(task))
logfn = os.path.join(tempdir, logbase)
if loglink:
bb.utils.remove(loglink)
try:
os.symlink(logbase, loglink)
except OSError:
pass
prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)
class ErrorCheckHandler(logging.Handler):
def __init__(self):
self.triggered = False
logging.Handler.__init__(self, logging.ERROR)
def emit(self, record):
if getattr(record, 'forcelog', False):
self.triggered = False
else:
self.triggered = True
# Handle logfiles
si = open('/dev/null', 'r')
try:
bb.utils.mkdirhier(os.path.dirname(logfn))
logfile = open(logfn, 'w')
except OSError:
logger.exception("Opening log file '%s'", logfn)
pass
# Dup the existing fds so we dont lose them
osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
# Replace those fds with our own
os.dup2(si.fileno(), osi[1])
os.dup2(logfile.fileno(), oso[1])
os.dup2(logfile.fileno(), ose[1])
# Ensure Python logging goes to the logfile
handler = logging.StreamHandler(logfile)
handler.setFormatter(logformatter)
# Always enable full debug output into task logfiles
handler.setLevel(logging.DEBUG - 2)
bblogger.addHandler(handler)
errchk = ErrorCheckHandler()
bblogger.addHandler(errchk)
localdata.setVar('BB_LOGFILE', logfn)
localdata.setVar('BB_RUNTASK', task)
flags = localdata.getVarFlags(task)
event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
try:
for func in (prefuncs or '').split():
exec_func(func, localdata)
exec_func(task, localdata)
for func in (postfuncs or '').split():
exec_func(func, localdata)
except FuncFailed as exc:
if quieterr:
event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
else:
errprinted = errchk.triggered
logger.error(str(exc))
event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
return 1
finally:
sys.stdout.flush()
sys.stderr.flush()
bblogger.removeHandler(handler)
# Restore the backup fds
os.dup2(osi[0], osi[1])
os.dup2(oso[0], oso[1])
os.dup2(ose[0], ose[1])
# Close the backup fds
os.close(osi[0])
os.close(oso[0])
os.close(ose[0])
si.close()
logfile.close()
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
logger.debug(2, "Zero size logfn %s, removing", logfn)
bb.utils.remove(logfn)
bb.utils.remove(loglink)
event.fire(TaskSucceeded(task, logfn, localdata), localdata)
if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False):
make_stamp(task, localdata)
return 0 | 064a217bdb967db5d15634864aa62fcd09068adf | 3,656,064 |
def mediaRecognitionApi():
"""
Retrieve the resource id, name, author
and time index of a sampled media.
"""
#TODO: Improve recognition
if 'file' not in request.files:
abort(400, "No file.")
file = request.files['file']
if file.filename == '':
abort(400, "No selected file")
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = pth.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
asynctask = recognizeMedia.delay(filepath)
return jsonify({"uuid": asynctask.task_id}), 202
abort(400, "Bad request") | a4a4a6aaffe83f2d15f5ad32b826f135385c7ef3 | 3,656,065 |
from typing import Sequence
def _scale_and_shift(
x: chex.Array,
params: Sequence[chex.Array],
has_scale: bool,
has_shift: bool,
) -> chex.Array:
"""Example of a scale and shift function."""
if has_scale and has_shift:
scale, shift = params
return x * scale + shift
elif has_scale:
assert len(params) == 1
return x * params[0]
elif has_shift:
assert len(params) == 1
return x + params[0]
else:
raise ValueError("You must have either `has_scale` or `has_shift` set "
"to True.") | 68c7128ff7c1788cd77e3737adff293f488e190e | 3,656,066 |
import math
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects
:param aLocation1: starting location
:param aLocation2: ending location
:return:
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
dlong_c = dlong*math.cos(math.radians(aLocation1.lat))
return math.sqrt((dlat * dlat) + (dlong_c * dlong_c)) * 1.113195e5 | 5f1428c099f79ba8b41177f87e6a3bffed13e00b | 3,656,067 |
import scipy
def merge_components(a,c,corr_img_all_r,U,V,normalize_factor,num_list,patch_size,merge_corr_thr=0.6,merge_overlap_thr=0.6,plot_en=False):
""" want to merge components whose correlation images are highly overlapped,
and update a and c after merge with region constrain
Parameters:
-----------
a: np.ndarray
matrix of spatial components (d x K)
c: np.ndarray
matrix of temporal components (T x K)
corr_img_all_r: np.ndarray
corr image
U, V: low rank decomposition of Y
normalize_factor: std of Y
num_list: indices of components
patch_size: dimensions for data
merge_corr_thr: scalar between 0 and 1
temporal correlation threshold for truncating corr image (corr(Y,c)) (default 0.6)
merge_overlap_thr: scalar between 0 and 1
overlap ratio threshold for two corr images (default 0.6)
Returns:
--------
a_pri: np.ndarray
matrix of merged spatial components (d x K')
c_pri: np.ndarray
matrix of merged temporal components (T x K')
corr_pri: np.ndarray
matrix of correlation images for the merged components (d x K')
flag: merge or not
"""
f = np.ones([c.shape[0],1]);
############ calculate overlap area ###########
a = csc_matrix(a);
a_corr = scipy.sparse.triu(a.T.dot(a),k=1);
cor = csc_matrix((corr_img_all_r>merge_corr_thr)*1);
temp = cor.sum(axis=0);
cor_corr = scipy.sparse.triu(cor.T.dot(cor),k=1);
cri = np.asarray((cor_corr/(temp.T)) > merge_overlap_thr)*np.asarray((cor_corr/temp) > merge_overlap_thr)*((a_corr>0).toarray());
a = a.toarray();
connect_comps = np.where(cri > 0);
if len(connect_comps[0]) > 0:
flag = 1;
a_pri = a.copy();
c_pri = c.copy();
G = nx.Graph();
G.add_edges_from(list(zip(connect_comps[0], connect_comps[1])))
comps=list(nx.connected_components(G))
merge_idx = np.unique(np.concatenate([connect_comps[0], connect_comps[1]],axis=0));
a_pri = np.delete(a_pri, merge_idx, axis=1);
c_pri = np.delete(c_pri, merge_idx, axis=1);
corr_pri = np.delete(corr_img_all_r, merge_idx, axis=1);
num_pri = np.delete(num_list,merge_idx);
for comp in comps:
comp=list(comp);
print("merge" + str(num_list[comp]+1));
a_zero = np.zeros([a.shape[0],1]);
a_temp = a[:,comp];
if plot_en:
spatial_comp_plot(a_temp, corr_img_all_r[:,comp].reshape(patch_size[0],patch_size[1],-1,order="F"),num_list[comp],ini=False);
mask_temp = np.where(a_temp.sum(axis=1,keepdims=True) > 0)[0];
a_temp = a_temp[mask_temp,:];
y_temp = np.matmul(a_temp, c[:,comp].T);
a_temp = a_temp.mean(axis=1,keepdims=True);
c_temp = c[:,comp].mean(axis=1,keepdims=True);
model = NMF(n_components=1, init='custom')
a_temp = model.fit_transform(y_temp, W=a_temp, H = (c_temp.T));
a_zero[mask_temp] = a_temp;
c_temp = model.components_.T;
corr_temp = vcorrcoef(U/normalize_factor, V.T, c_temp);
a_pri = np.hstack((a_pri,a_zero));
c_pri = np.hstack((c_pri,c_temp));
corr_pri = np.hstack((corr_pri,corr_temp));
num_pri = np.hstack((num_pri,num_list[comp[0]]));
return flag, a_pri, c_pri, corr_pri, num_pri
else:
flag = 0;
return flag | e2e15c208ae71ba20cc84d8c0501485c04e41a90 | 3,656,068 |
def RenderSubpassStartInputAttachmentsVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartInputAttachmentsVector(builder, numElems) | 484ba9746f278cfcc7d50a282605ebc9a3a4fb2b | 3,656,069 |
def GetCLInfo(cl_info_str):
"""Gets CL's repo_name and revision."""
return cl_info_str.split('/') | d077216b2804c249a7d0ffdbff7f992dde106501 | 3,656,070 |
def acyclic_run(pipeline):
"""
@summary: 逆转反向边
@return:
"""
deformed_flows = {'{}.{}'.format(flow[PWE.source], flow[PWE.target]): flow_id
for flow_id, flow in pipeline[PWE.flows].items()}
reversed_flows = {}
while True:
no_circle = validate_graph_without_circle(pipeline)
if no_circle['result']:
break
source = no_circle['error_data'][-2]
target = no_circle['error_data'][-1]
circle_flow_key = '{}.{}'.format(source, target)
flow_id = deformed_flows[circle_flow_key]
reversed_flows[flow_id] = deepcopy(pipeline[PWE.flows][flow_id])
pipeline[PWE.flows][flow_id].update({
PWE.source: target,
PWE.target: source
})
source_node = pipeline['all_nodes'][source]
delete_flow_id_from_node_io(source_node, flow_id, PWE.outgoing)
add_flow_id_to_node_io(source_node, flow_id, PWE.incoming)
target_node = pipeline['all_nodes'][target]
delete_flow_id_from_node_io(target_node, flow_id, PWE.incoming)
add_flow_id_to_node_io(target_node, flow_id, PWE.outgoing)
return reversed_flows | 535edb2a7ccd1c0995fe46bff8a931175c353e51 | 3,656,071 |
def TextAreaFieldWidget(field, request): # pylint: disable=invalid-name
"""IFieldWidget factory for TextWidget."""
return FieldWidget(field, TextAreaWidget(request)) | 0d2431b1274e34978a6869efed0014982aaaa2e2 | 3,656,072 |
import os
def _cgroup_limit(cpu, memory_size, pid):
"""Modify 'cgroup' files to set resource limits.
Each pod(worker) will have cgroup folders on the host cgroup filesystem,
like '/sys/fs/cgroup/<resource_type>/kubepods/<qos_class>/pod<pod_id>/',
to limit memory and cpu resources that can be used in pod.
For more information about cgroup, please see [1], about sharing PID
namespaces in kubernetes, please see also [2].
Return None if successful otherwise a Flask.Response object.
[1]https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-creating_cgroups
[2]https://github.com/kubernetes/kubernetes/pull/51634
"""
hostname = os.getenv('HOSTNAME')
pod_id = os.getenv('POD_UID')
qos_class = None
if os.getenv('QOS_CLASS') == 'BestEffort':
qos_class = 'besteffort'
elif os.getenv('QOS_CLASS') == 'Burstable':
qos_class = 'burstable'
elif os.getenv('QOS_CLASS') == 'Guaranteed':
qos_class = ''
if not pod_id or qos_class is None:
return make_response("Failed to get current worker information", 500)
memory_base_path = os.path.join('/qinling_cgroup', 'memory', 'kubepods',
qos_class, 'pod%s' % pod_id)
cpu_base_path = os.path.join('/qinling_cgroup', 'cpu', 'kubepods',
qos_class, 'pod%s' % pod_id)
memory_path = os.path.join(memory_base_path, hostname)
cpu_path = os.path.join(cpu_base_path, hostname)
if os.path.isdir(memory_base_path):
if not os.path.isdir(memory_path):
os.makedirs(memory_path)
if os.path.isdir(cpu_base_path):
if not os.path.isdir(cpu_path):
os.makedirs(cpu_path)
try:
# set cpu and memory resource limits
with open('%s/memory.limit_in_bytes' % memory_path, 'w') as f:
f.write('%d' % int(memory_size))
with open('%s/cpu.cfs_period_us' % cpu_path, 'w') as f:
f.write('%d' % PERIOD)
with open('%s/cpu.cfs_quota_us' % cpu_path, 'w') as f:
f.write('%d' % ((int(cpu)*PERIOD/1000)))
# add pid to 'tasks' files
with open('%s/tasks' % memory_path, 'w') as f:
f.write('%d' % pid)
with open('%s/tasks' % cpu_path, 'w') as f:
f.write('%d' % pid)
except Exception as e:
return make_response("Failed to modify cgroup files: %s"
% str(e), 500) | 24d0ad0ea9afaa1b0d0c1de9114175014fe7666a | 3,656,073 |
def s_wexler(T_K):
"""
Calculates slope of saturation vapor pressure curve over water at each temperature
based on Wexler 1976, with coefficients from Hardy 1998 (ITS-90).
Args:
T_K (np.ndarray (dimension<=2), float, list of floats) : Air or Dewpoint Temperatures [K]
Returns:
s : np.ndarray of slopes [Pa / deg C]
"""
powers = np.arange(-3, 4).reshape((1, 1, 7))
pow_coeffs = powers.copy() + 1
T_K = np.atleast_3d(T_K).astype(dtype=np.float64)
temps = np.repeat(T_K, 8, axis=-1)
temps[..., :-1] = pow_coeffs * c.gs[..., :-1] * np.power(temps[..., :-1], powers)
temps[..., -1] = -1. * c.gs[..., -1] * temps[..., -1] ** -1
s = np.squeeze(temps.sum(axis=-1)) * es_wexler(T_K)
return s | b27b713b6c609115fa36f458c7f72358c001fbd5 | 3,656,074 |
from importlib import import_module
def get_additional_bases():
"""
Looks for additional view bases in settings.REST_EASY_VIEW_BASES.
:return:
"""
resolved_bases = []
for base in getattr(settings, 'REST_EASY_VIEW_BASES', []):
mod, cls = base.rsplit('.', 1)
resolved_bases.append(getattr(import_module(mod), cls))
return resolved_bases | 485c2f0d4778399ff534f40e681706419c3c923a | 3,656,075 |
from pathlib import Path
import warnings
def load_mask_from_shp(shp_file: Path, metad: dict) -> np.ndarray:
"""
Load a mask containing geometries from a shapefile,
using a reference dataset
Parameters
----------
shp_file : str
shapefile containing a polygon
metad : dict
rasterio-style metadata dictionary
Returns
-------
mask_im : numpy.ndarray
mask image
Notes
-----
1) Pixels outside of the polygon are assigned
as nodata in the mask
2) Exception is raised if no Polygon geometry exists
in the shapefile
"""
sf = gpd.read_file(shp_file).to_crs(metad["crs"])
# extract non-empty polygons from the shapefile
geoms = [
g for g in sf.geometry if g.type.lower() == "polygon" and g.is_empty is False
]
nshapes = len(geoms)
if nshapes == 0:
raise Exception("input shapefile does not have any 'Polygon' geometry")
if nshapes > 1:
warnings.warn(
f"{nshapes} Polygons found in shapefile. It is recommended only to have one",
UserWarning,
stacklevel=1,
)
mask_im = rasterio.features.geometry_mask(
geoms,
out_shape=(metad["height"], metad["width"]),
transform=metad["transform"],
all_touched=False,
invert=True,
)
return mask_im | fd8178919b2afec71f69a8a7a00e1b2f224d2509 | 3,656,076 |
def est_corner_plot(estimation, settings=None, show=True, save=None):
"""Wrapper to corner plot of `corner <https://corner.readthedocs.io/en/latest/>`_ module;
visualisation of the parameter posterior distribution by all 2-dimensional and
1-dimensional marginals.
Parameters
----------
estimation : memocell.estimation.Estimation
A memocell estimation object.
settings : dict of dict, optional
Optional labels for parameters.
show : bool, optional
Plot is shown if `show=True`.
save : None or str, optional
Provide a path to save the plot.
Returns
-------
fig : matplotlib.figure.Figure
axes : list or array of matplotlib.axes
"""
# if not given, create some default settings
if settings==None:
settings = dict()
for theta_id in estimation.net.net_theta_symbolic:
param = estimation.net.net_rates_identifier[theta_id]
settings[param] = {'label': param}
# get plotting information from estimation instance
samples, labels = estimation._samples_corner_parameters(settings)
# use corner package for this plot
fig = corner.corner(samples, labels=labels)
# save/show figure
if save!=None:
plt.savefig(save, bbox_inches='tight')
if show:
plt.show(fig, block=False)
return fig, fig.axes | 87f9eda0dc3bf61f66d4ee28f693dad4ef383f24 | 3,656,077 |
def voigt_peak_err(peak, A, dA, alphaD, dalphaD):
"""
Gives the error on the peak of the Voigt profile. \
It assumes no correlation between the parameters and that they are \
normally distributed.
:param peak: Peak of the Voigt profile.
:type peak: array
:param A: Area under the Voigt profile.
:param dA: Error on the area `A`.
:type dA: array
:param alphaD: HWHM of the Gaussian core.
:type alphaD: array
"""
dpeak = abs(peak)*np.sqrt(np.power(dalphaD/alphaD, 2.) + np.power(dA/A, 2.))
return dpeak | 52d3fbb7fabe5dfe2e5ab67bcd498d5434f7afc7 | 3,656,078 |
import zipfile
import os
def zip_recursive(destination, source_dir, rootfiles):
"""
Recursively zips source_dir into destination.
rootfiles should contain a list of files in the top level directory that
are to be included. Any top level files not in rootfiles will be omitted
from the zip file.
"""
zipped = zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(source_dir):
inRoot = False
if root == source_dir:
inRoot = True
if inRoot:
for d in dirs:
try:
rootfiles.index(d)
except ValueError:
dirs.remove(d)
for f in files[:]:
if inRoot:
try:
rootfiles.index(f)
except ValueError:
continue
fullpath = os.path.join(root, f)
zipped.write(fullpath)
zipped.close()
return destination | 267dedb78495e02bbeb2ffaddcfe2278ab72be67 | 3,656,079 |
import typing
def discord_api_call(method: str, params: typing.Dict, func, data, token: str) -> typing.Any:
""" Calls Discord API. """
# This code is from my other repo -> https://gtihub.com/kirillzhosul/python-discord-token-grabber
# Calling.
return func(
f"https://discord.com/api/{method}",
params=params,
headers={
"Authorization": f"{token}",
"Content-Type": "application/json"
},
data=data
) | 84ea201c88dd4260bbc80dbd45654c01cb5a36ee | 3,656,080 |
import logging
def get_startup(config: Config) -> Startup:
"""Extracts and validates startup parameters from the application config
file for the active profile
"""
db_init_schema = config.extract_config_value(
('postgres', 'startup', 'init_schema'),
lambda x: x is not None and isinstance(x, bool),
lambda x: x,
'bool'
)
db_wipe_schema = config.extract_config_value(
('postgres', 'startup', 'wipe_schema'),
lambda x: x is not None and isinstance(x, bool),
lambda x: x,
'bool'
)
if db_wipe_schema and not db_init_schema:
logging.getLogger(__name__).warning(
"Configuration is set to wipe database schema, but not"
" re-initialize it afterward: despite configuration, schema will be"
" re-initialized"
)
db_init_schema = True
return Startup(
init_schema=db_init_schema,
wipe_schema=db_wipe_schema
) | daa3809ed4f8be6c991796c8bbc11ee7b1434ee5 | 3,656,081 |
def new_request(request):
"""Implements view that allows users to create new requests"""
user = request.user
if user.user_type == 'ADM':
return redirect('/admin')
if request.method == "POST":
request_type = request.POST.get('requestType')
if request_type == 'SC' and user.user_object.type == 'PR':
schedule = request.POST.getlist('schedule')
start_time = request.POST.get('start_time')
# Create schedule model
monday_start = tuesday_start = wednesday_start = None
thursday_start = friday_start = saturday_start = sunday_start = None
for day in schedule:
if day == 'MO':
monday_start = start_time
elif day == 'TU':
tuesday_start = start_time
elif day == 'WE':
wednesday_start = start_time
elif day == 'TH':
thursday_start = start_time
elif day == 'FR':
friday_start = start_time
elif day == 'SA':
saturday_start = start_time
elif day == 'SU':
sunday_start = start_time
schedule_model = Schedule.objects.get_or_create(monday_start=monday_start,
tuesday_start=tuesday_start,
wednesday_start=wednesday_start,
thursday_start=thursday_start,
friday_start=friday_start,
saturday_start=saturday_start,
sunday_start=sunday_start)[0]
request_change = None
else:
schedule_model = None
request_change = request.POST.get('request_change')
request = Request.objects.get_or_create(user_id=user, schedule_id=schedule_model,
request_change=request_change,
current_request_review_id=None,
request_type=request_type)[0]
request_review = RequestReview.objects.get_or_create(request_id=request,
status='P')[0]
request.current_request_review_id = request_review
request_review.save()
request.save()
# create new notification
notification = Notification.objects.get_or_create(notification_type='R', is_dismissed=False,
request=request)[0]
notification.save()
# sending emails for this request:
email_vendor.email_admin_new_request(request)
email_vendor.email_user_new_request(request)
return redirect('/requests')
else:
# GET Request
return render(request, 'applications/request_new.html') | d90f72d5f299282709ed8a925569512a81d60591 | 3,656,082 |
def get_slice_test(eval_kwargs, test_kwargs, test_dataloader, robustness_testing_datasets):
"""
Args:
test_dataloader:
test_kwargs:
eval_kwargs (dict):
test_dataloader (Dataloader):
robustness_testing_datasets (dict):
Returns:
"""
slice_test = None
if 'slice' in robustness_testing_datasets:
slice_kwargs = {'dataset': robustness_testing_datasets['slice']}
if 'sampler' in test_kwargs:
slice_kwargs['sampler'] = test_kwargs['sampler']
slice_kwargs.update(eval_kwargs)
slice_test = test_dataloader(**slice_kwargs)
return slice_test | b995ff26fd743f106115c5d5958dd0654e0d4645 | 3,656,083 |
def transform_config(cfg, split_1='search:', split_2='known_papers:'):
"""Ugly function to make cfg.yml less ugly."""
before_search, after_search = cfg.split(split_1, 1)
search_default, papers_default = after_search.split(split_2, 1)
search, paper_comment = '', ''
for line in search_default.splitlines():
line = line.strip()
if line:
if line.startswith('-'):
search += ' '
elif line.startswith('# List of paper ids'):
paper_comment = line
continue
search += ' ' + line + '\n'
ok = papers_default
if '-' in papers_default:
ok = ' ['
for line in papers_default.splitlines():
line = line.strip()
if '-' in line:
ok += line.split('- ')[1] + ', '
ok = ok[:-2] + ']'
return f"{before_search}{split_1}\n{search}{paper_comment}\n{split_2}{ok}" | 78d079b6b06c8426be2b65307782129c414a42c4 | 3,656,084 |
def filter_coords(raw_lasso, filter_mtx):
"""Filter the raw data corresponding to the new coordinates."""
filter_mtx_use = filter_mtx.copy()
filter_mtx_use["y"] = filter_mtx_use.index
lasso_data = pd.melt(filter_mtx_use, id_vars=["y"], value_name="MIDCounts")
lasso_data = lasso_data[lasso_data["MIDCounts"] != 0][["x", "y"]]
new_lasso = pd.merge(raw_lasso, lasso_data, on=["x", "y"], how="inner")
return new_lasso | fce1159db2a2bdb75acbe9b7ccb236af8bade627 | 3,656,085 |
def compute_threshold(predictions_list, dev_labels, f1=True):
"""
Determine the best threshold to use for classification.
Inputs:
predictions_list: prediction found by running the model
dev_labels: ground truth label to be compared with predictions_list
f1: True is using F1 score, False if using accuracy score
Returns:
best_threshold: threshold that yields the best accuracy
"""
predictions_list = predictions_list.reshape(-1, 1)
dev_labels = dev_labels.reshape(-1, 1)
both = np.column_stack((predictions_list, dev_labels))
both = both[both[:, 0].argsort()]
predictions_list = both[:, 0].ravel()
dev_labels = both[:, 1].ravel()
accuracies = np.zeros(np.shape(predictions_list))
for i in range(np.shape(predictions_list)[0]):
score = predictions_list[i]
predictions = (predictions_list >= score) * 2 - 1
accuracy = accuracy_score(predictions, dev_labels)
if f1:
accuracy = f1_score(dev_labels, predictions)
accuracies[i] = accuracy
indices = np.argmax(accuracies)
best_threshold = np.mean(predictions_list[indices])
return best_threshold | 230824c1454978cbe7c5f50ee43fba7b16754922 | 3,656,086 |
import torch
def color2position(C, min=None, max=None):
"""
Converts the input points set into colors
Parameters
----------
C : Tensor
the input color tensor
min : float (optional)
the minimum value for the points set. If None it will be set to -1 (default is None)
max : float (optional)
the maximum value for the points set. If None it will be set to +1 (default is None)
Returns
-------
Tensor
the points set tensor
"""
if min is None:
min = -1
if max is None:
max = 1
return torch.add(torch.mul(C, max-min), min) | 809d8cfd6f24e6abb6d65d5b576cc0b0ccbc3fdf | 3,656,087 |
def is_empty_parsed_graph(graph):
"""
Checks if graph parsed from web page only contains an "empty" statement, that was not embedded in page
namely (<subjectURI>, <http://www.w3.org/ns/md#item>, <http://www.w3.org/1999/02/22-rdf-syntax-ns#nil>)
:param graph: an rdflib.Graph
:return: True if graph contains no "real" RDF, False otherwise
"""
if len(graph) > 1:
return False
for po in graph.predicate_objects(None):
if po == (URIRef(u'http://www.w3.org/ns/md#item'),
URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil')):
return True
return False | bf66271bc23f078669bc478a133b67c715fd8fdf | 3,656,088 |
def fillinNaN(var,neighbors):
"""
replacing masked area using interpolation
"""
for ii in range(var.shape[0]):
a = var[ii,:,:]
count = 0
while np.any(a.mask):
a_copy = a.copy()
for hor_shift,vert_shift in neighbors:
if not np.any(a.mask): break
a_shifted=np.roll(a_copy,shift=hor_shift,axis=1)
a_shifted=np.roll(a_shifted,shift=vert_shift,axis=0)
idx=~a_shifted.mask*a.mask
#print count, idx[idx==True].shape
a[idx]=a_shifted[idx]
count+=1
var[ii,:,:] = a
return var | a8ffc34dac72cbd4ecdbbc9ad02270a457b0b8d9 | 3,656,089 |
from plistlib import loads, FMT_BINARY
from bplistlib import loads
def parse_plist_from_bytes(data):
"""
Convert a binary encoded plist to a dictionary.
:param data: plist data
:return: dictionary
"""
try:
return loads(data, fmt=FMT_BINARY)
except ImportError:
return loads(data, binary=True) | b9f96ef749af88bdb950d8f3f36b584f6766661d | 3,656,090 |
def projection_standardizer(emb):
"""Returns an affine transformation to translate an embedding to the centroid
of the given set of points."""
return Affine.translation(*(-emb.mean(axis=0)[:2])) | 65686636caeac72a16198ac6c7f603836eaedc53 | 3,656,091 |
def forward_imputation(X_features, X_time):
"""
Fill X_features missing values with values, which are the same as its last measurement.
:param X_features: time series features for all samples
:param X_time: times, when observations were measured
:return: X_features, filled with last measurements instead of zeros (missing observations)
"""
time_length = [np.where(times == 0)[0][1] if np.where(times == 0)[0][0] == 0 else np.where(times == 0)[0][0] for times in X_time]
# impute times series features
for i, sample in enumerate(X_features):
for j, ts in enumerate(sample.T): # note the transposed matrix
first_observation = True
current_value = -1
for k, observation in enumerate(ts[:time_length[i]]):
if X_features[i, k, j] == 0 and first_observation:
continue
elif X_features[i, k, j] != 0:
current_value = X_features[i, k, j]
first_observation = False
elif X_features[i, k, j] == 0 and not first_observation:
X_features[i, k, j] = current_value
return X_features | ea0a41bfef02752338dc5384ce7fcd447c95f8c7 | 3,656,092 |
def calc_mlevel(ctxstr, cgmap, gtftree, pmtsize=1000):
"""
Compute the mean methylation level of promoter/gene/exon/intron/IGN in each gene
"""
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
ign = defaultdict(list)
mtable = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
counter = defaultdict(lambda: defaultdict(int))
for chr in set(ctxstr) & set(cgmap) & set(gtftree):
mask = [1]*len(cgmap[chr])
for (gene_id, strand) in gtftree[chr]:
feature_mlevels = defaultdict(lambda: defaultdict(list))
gstart = min(gtftree[chr][(gene_id, strand)])[0]
gend = max(gtftree[chr][(gene_id, strand)])[1]
mask[gstart:gend] = [0]*(gend - gstart)
if strand == '+':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart-pmtsize:gstart], cgmap[chr][gstart-pmtsize:gstart])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
elif strand == '-':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gend:gend+pmtsize], cgmap[chr][gend:gend+pmtsize])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart:gend], cgmap[chr][gstart:gend])):
tag = tag.upper()
inexon = False
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['gene'].append(mlevel)
for exon in gtftree[chr][(gene_id, strand)]:
if exon[0] <= pos+gstart < exon[1]:
feature_mlevels[inv_ctxs[tag]]['exon'].append(mlevel)
inexon = True
break
if not inexon:
feature_mlevels[inv_ctxs[tag]]['intron'].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
for feature in feature_mlevels[ctx]:
counter[ctx][feature] += len(feature_mlevels[ctx][feature])
mtable[ctx][gene_id][feature] = np.mean(feature_mlevels[ctx][feature])
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr], cgmap[chr])):
tag = tag.upper()
if (tag in inv_ctxs) and (mask[pos] == 1) and (mlevel != '-'):
ign[inv_ctxs[tag]].append(mlevel)
for ctx in ign:
ign[ctx] = np.mean(ign[ctx])
cg_table = pd.DataFrame(mtable['CG']).T
cg_table = cg_table[['pmt', 'gene', 'exon', 'intron']]
chg_table = pd.DataFrame(mtable['CHG']).T
chg_table = chg_table[['pmt', 'gene', 'exon', 'intron']]
chh_table = pd.DataFrame(mtable['CHH']).T
chh_table = chh_table[['pmt', 'gene', 'exon', 'intron']]
return ign, cg_table, chg_table, chh_table | 59b3a36e09e6ea0dd3608da0cf04f14f4d487182 | 3,656,093 |
def _get_service(plugin):
"""
Return a service (ie an instance of a plugin class).
:param plugin: any of: the name of a plugin entry point; a plugin class; an
instantiated plugin object.
:return: the service object
"""
if isinstance(plugin, basestring):
try:
(plugin,) = iter_entry_points(
group=PLUGINS_ENTRY_POINT_GROUP,
name=plugin
)
except ValueError:
raise PluginNotFoundException(plugin)
return plugin.load()()
elif isinstance(plugin, _pca_Plugin):
return plugin
elif isclass(plugin) and issubclass(plugin, _pca_Plugin):
return plugin()
else:
raise TypeError("Expected a plugin name, class or instance", plugin) | a3433521b40861926d9ac3efa6a693d926a7fc94 | 3,656,094 |
def taiut1(tai1, tai2, dta):
"""
Wrapper for ERFA function ``eraTaiut1``.
Parameters
----------
tai1 : double array
tai2 : double array
dta : double array
Returns
-------
ut11 : double array
ut12 : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - - -
e r a T a i u t 1
- - - - - - - - - -
Time scale transformation: International Atomic Time, TAI, to
Universal Time, UT1.
Given:
tai1,tai2 double TAI as a 2-part Julian Date
dta double UT1-TAI in seconds
Returned:
ut11,ut12 double UT1 as a 2-part Julian Date
Returned (function value):
int status: 0 = OK
Notes:
1) tai1+tai2 is Julian Date, apportioned in any convenient way
between the two arguments, for example where tai1 is the Julian
Day Number and tai2 is the fraction of a day. The returned
UT11,UT12 follow suit.
2) The argument dta, i.e. UT1-TAI, is an observed quantity, and is
available from IERS tabulations.
Reference:
Explanatory Supplement to the Astronomical Almanac,
P. Kenneth Seidelmann (ed), University Science Books (1992)
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
ut11, ut12, c_retval = ufunc.taiut1(tai1, tai2, dta)
check_errwarn(c_retval, 'taiut1')
return ut11, ut12 | c7f9490a5af86de98c89af37cb6ca1bcb92d107a | 3,656,095 |
from SPARQLWrapper import SPARQLWrapper, JSON
def fetch_ppn(ppn):
"""
"""
ENDPOINT_URL = 'http://openvirtuoso.kbresearch.nl/sparql'
sparql = SPARQLWrapper(ENDPOINT_URL)
sqlquery = """
SELECT ?collatie WHERE {{
kbc:{ppn} dcterms:extent ?formaat, ?collatie .
FILTER (?formaat != ?collatie ) .
FILTER regex(?formaat, "^[0-9]{{1,2}}°", "i") .
}}
""".format(ppn=ppn)
sparql.setQuery(sqlquery)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
# {'head': {'link': [], 'vars': ['collatie']}, 'results': {'bindings': [{'collatie': {'value': '*`SUP`8`LO` A-S`SUP`8`LO` (S8 blank)', 'type': 'literal'}}], 'distinct': False, 'ordered': True}}
result = results['results']['bindings'][0]['collatie']['value']
return result | fd974eccc7f4c099320c50a20b678d36cea7b899 | 3,656,096 |
def prepare_link_title(
item: feedparser.FeedParserDict) -> feedparser.FeedParserDict:
"""
Для RSS Item возвращает ссылку, заголовок и описание
:param item:
:return:
"""
result = None
if item:
assert item.title, 'Not found title in item'
assert item.link, 'Not found link in item'
link = item.link.replace('https://www.google.com/url?rct=j&sa=t&url=',
'')
ge_ind = link.find('&ct=ga')
if ge_ind > -1:
link = link[0:ge_ind]
title = item.title.replace('<b>', '').replace('</b>', '')
item.link = link
item.title = title
result = item
return result | 445eccd9855484b65b726a4ee12a3dfa9a9de375 | 3,656,097 |
def api_docs_redirect():
""" Redirect to API docs """
return redirect('/api/v1', code=302) | d7ed10aa264d1403325f0b044b4c7b8b20b5989f | 3,656,098 |
from typing import List
def print_topics(model, vectorizer, top_n: int=10)-> List:
"""Print the top n words found by each topic model.
Args:
model: Sklearn LatentDirichletAllocation model
vectorizer: sklearn CountVectorizer
top_n (int): Number of words you wish to return
Source: https://towardsdatascience.com/end-to-end-topic-modeling-in-python-latent-dirichlet-allocation-lda-35ce4ed6b3e0
"""
for idx, topic in enumerate(model.components_):
print(f"Topic {idx}:")
print([(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
return [vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n-1:-1]] | c0477c19c6806c2eaacb4165d332291dc0ba341b | 3,656,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.