content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import pickle
import os
def getAllTraj () :
""" get all trajectories from C.TRAJ_DIR """
def loadPickle (f) :
with open(osp.join(C.TRAJ_DIR, f), 'rb') as fd :
return pickle.load(fd)
return list(map(loadPickle, os.listdir(C.TRAJ_DIR))) | b7d5bfda197445723d024800ec276d7e2050a987 | 3,652,700 |
def _get_count(_khoros_object, _user_id, _object_type):
"""This function returns the count of a specific user object (e.g. ``albums``, ``followers``, etc.) for a user.
:param _khoros_object: The core :py:class:`khoros.Khoros` object
:type _khoros_object: class[khoros.Khoros]
:param _user_id: The User ID associated with the user
:type _user_id: int, str
:param _object_type: The type of object for which to get the count (e.g. ``albums``, ``followers``, etc.)
:returns: The user object count as an integer
:raises: :py:exc:`khoros.errors.exceptions.GETRequestError`
"""
_api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)
return int(_api_response['data']['items'][0][_object_type]['count']) | 5e0cb02a74a819984ab271fcaad469a60f4bdf43 | 3,652,701 |
def antiSymmetrizeSignal(y, symmetryStep):
"""
Dischard symmetric part of a signal by
taking the difference of the signal at x[n] and x[n + symmetry_step]
get your corresponding x data as x[0:len(y)/2]
Parameters
----------
y : array_like
numpy array or list of data values to anti symmtetrize
symmetryStep : scalar
expected symmetry of the signal at x[n] occurs at x[n+symmetryStep]
Returns
----------
y_symmetrized : ndarray
numpy array of dimension size(y)/2 of the antisymmetrized data
"""
y = np.array(y)
s = np.zeros(len(y)/2)
for idx in range(0, len(s)):
# (positive field - negative field)/2
s[idx] = (y[idx] - y[idx+symmetryStep])/2.-(y[0] - y[0+symmetryStep])/2.
return s | 0936d5fc3883d3ce6ee2f6c77fb9b4bc59177426 | 3,652,702 |
def lms_to_rgb(img):
"""
rgb_matrix = np.array(
[[0.0809444479, -0.130504409, 0.116721066],
[0.113614708, -0.0102485335, 0.0540193266],
[-0.000365296938, -0.00412161469, 0.693511405]
]
)
"""
rgb_matrix = np.array(
[[ 2.85831110e+00, -1.62870796e+00, -2.48186967e-02],
[-2.10434776e-01, 1.15841493e+00, 3.20463334e-04],
[-4.18895045e-02, -1.18154333e-01, 1.06888657e+00]]
)
return np.tensordot(img, rgb_matrix, axes=([2], [1])) | 76ce7a5f73712a6d9f241d66b3af8a54752b141d | 3,652,703 |
import time
def timeit(func):
"""
Decorator that returns the total runtime of a function
@param func: function to be timed
@return: (func, time_taken). Time is in seconds
"""
def wrapper(*args, **kwargs) -> float:
start = time.time()
func(*args, **kwargs)
total_time = time.time() - start
return total_time
return wrapper | 68723a74c96c2d004eed9533f9023d77833c509b | 3,652,704 |
def merge_count(data1, data2):
"""Auxiliary method to merge the lengths."""
return data1 + data2 | 8c0280b043b7d21a411ac14d3571acc50327fdbc | 3,652,705 |
def orthogonal_procrustes(fixed, moving):
"""
Implements point based registration via the Orthogonal Procrustes method.
Based on Arun's method:
Least-Squares Fitting of two, 3-D Point Sets, Arun, 1987,
`10.1109/TPAMI.1987.4767965 <http://dx.doi.org/10.1109/TPAMI.1987.4767965>`_.
Also see `this <http://eecs.vanderbilt.edu/people/mikefitzpatrick/papers/2009_Medim_Fitzpatrick_TRE_FRE_uncorrelated_as_published.pdf>`_
and `this <http://tango.andrew.cmu.edu/~gustavor/42431-intro-bioimaging/readings/ch8.pdf>`_.
:param fixed: point set, N x 3 ndarray
:param moving: point set, N x 3 ndarray of corresponding points
:returns: 3x3 rotation ndarray, 3x1 translation ndarray, FRE
:raises: ValueError
"""
validate_procrustes_inputs(fixed, moving)
# This is what we are calculating
R = np.eye(3)
T = np.zeros((3, 1))
# Arun equation 4
p = np.ndarray.mean(moving, 0)
# Arun equation 6
p_prime = np.ndarray.mean(fixed, 0)
# Arun equation 7
q = moving - p
# Arun equation 8
q_prime = fixed - p_prime
# Arun equation 11
H = np.matmul(q.transpose(), q_prime)
# Arun equation 12
# Note: numpy factors h = u * np.diag(s) * v
svd = np.linalg.svd(H)
# Replace Arun Equation 13 with Fitzpatrick, chapter 8, page 470,
# to avoid reflections, see issue #19
X = _fitzpatricks_X(svd)
# Arun step 5, after equation 13.
det_X = np.linalg.det(X)
if det_X < 0 and np.all(np.flip(np.isclose(svd[1], np.zeros((3, 1))))):
# Don't yet know how to generate test data.
# If you hit this line, please report it, and save your data.
raise ValueError("Registration fails as determinant < 0"
" and no singular values are close enough to zero")
if det_X < 0 and np.any(np.isclose(svd[1], np.zeros((3, 1)))):
# Implement 2a in section VI in Arun paper.
v_prime = svd[2].transpose()
v_prime[0][2] *= -1
v_prime[1][2] *= -1
v_prime[2][2] *= -1
X = np.matmul(v_prime, svd[0].transpose())
# Compute output
R = X
tmp = p_prime.transpose() - np.matmul(R, p.transpose())
T[0][0] = tmp[0]
T[1][0] = tmp[1]
T[2][0] = tmp[2]
fre = compute_fre(fixed, moving, R, T)
return R, T, fre | 5818c67e478ad9dd59ae5a1ba0c847d60234f222 | 3,652,706 |
def make_model(arch_params, patch_size):
""" Returns the model.
Used to select the model.
"""
return RDN(arch_params, patch_size) | 6cf91ea68bcf58d4aa143a606bd774761f37acb0 | 3,652,707 |
def calc_error(frame, gap, method_name):
"""Calculate the error between the ground truth and the GAP prediction"""
frame.single_point(method_name=method_name, n_cores=1)
pred = frame.copy()
pred.run_gap(gap=gap, n_cores=1)
error = np.abs(pred.energy - frame.energy)
logger.info(f'|E_GAP - E_0| = {np.round(error, 3)} eV')
return error | 9a3eb0b115c394703cb7446852982fa1468607ad | 3,652,708 |
def find_model(sender, model_name):
"""
Register new model to ORM
"""
MC = get_mc()
model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!=''))
if model:
model_inst = model.get_instance()
orm.set_model(model_name, model_inst.table_name, appname=__name__, model_path='')
return orm.__models__.get(model_name) | 4c78f135b502119fffb6b2ccf5f09335e739a97a | 3,652,709 |
def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs):
"""List subtitles.
The `videos` must pass the `languages` check of :func:`check_video`.
All other parameters are passed onwards to the provided `pool_class` constructor.
:param videos: videos to list subtitles for.
:type videos: set of :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:param pool_class: class to use as provider pool.
:type: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar
:return: found subtitles per video.
:rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle`
"""
listed_subtitles = defaultdict(list)
# check videos
checked_videos = []
for video in videos:
if not check_video(video, languages=languages):
logger.info('Skipping video %r', video)
continue
checked_videos.append(video)
# return immediately if no video passed the checks
if not checked_videos:
return listed_subtitles
# list subtitles
with pool_class(**kwargs) as pool:
for video in checked_videos:
logger.info('Listing subtitles for %r', video)
subtitles = pool.list_subtitles(video, languages - video.subtitle_languages)
listed_subtitles[video].extend(subtitles)
logger.info('Found %d subtitle(s)', len(subtitles))
return listed_subtitles | f5d9fa450f0df5c71c320d972e54c2502bbfd37d | 3,652,710 |
def public_incursion_no_expires(url, request):
""" Mock endpoint for incursion.
Public endpoint without cache
"""
return httmock.response(
status_code=200,
content=[
{
"type": "Incursion",
"state": "mobilizing",
"staging_solar_system_id": 30003893,
"constellation_id": 20000568,
"infested_solar_systems": [
30003888,
],
"has_boss": True,
"faction_id": 500019,
"influence": 1
}
]
) | 31d008b6479d8e2a5e4bc9f2d7b4af8cc4a40b03 | 3,652,711 |
def bad_topics():
""" Manage Inappropriate topics """
req = request.vars
view_info = {}
view_info['errors'] = []
tot_del = 0
if req.form_submitted:
for item in req:
if item[:9] == 'inapp_id_':
inapp_id = int(req[item])
db(db.zf_topic_inappropriate.id==inapp_id).update(read_flag=True)
tot_del += 1
topics = db((db.zf_topic_inappropriate.read_flag==False) & (db.zf_topic.id==db.zf_topic_inappropriate.topic_id)).select(db.zf_topic_inappropriate.ALL, db.zf_topic.title, orderby=~db.zf_topic_inappropriate.creation_date)
view_info.update({'removed': tot_del})
return dict(request=request, topics=topics, view_info=view_info)
else:
topics = db((db.zf_topic_inappropriate.read_flag==False) & (db.zf_topic.id==db.zf_topic_inappropriate.topic_id)).select(db.zf_topic_inappropriate.ALL, db.zf_topic.title, orderby=~db.zf_topic_inappropriate.creation_date)
return dict(request=request, topics=topics, view_info=view_info) | 64c40b98a77c5934bd0593c9f5c4f31370980e8a | 3,652,712 |
def get_min_id_for_repo_mirror_config():
"""
Gets the minimum id for a repository mirroring.
"""
return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar() | 21a99988a1805f61ede9d689494b59b61c0391d8 | 3,652,713 |
def check_series(
Z,
enforce_univariate=False,
allow_empty=False,
allow_numpy=True,
enforce_index_type=None,
):
"""Validate input data.
Parameters
----------
Z : pd.Series, pd.DataFrame
Univariate or multivariate time series
enforce_univariate : bool, optional (default=False)
If True, multivariate Z will raise an error.
allow_empty : bool
enforce_index_type : type, optional (default=None)
type of time index
Returns
-------
y : pd.Series, pd.DataFrame
Validated time series
Raises
------
ValueError, TypeError
If Z is an invalid input
"""
# Check if pandas series or numpy array
if not allow_numpy:
valid_data_types = tuple(
filter(lambda x: x is not np.ndarray, VALID_DATA_TYPES)
)
else:
valid_data_types = VALID_DATA_TYPES
if not isinstance(Z, valid_data_types):
raise TypeError(
f"Data must be a one of {valid_data_types}, but found type: {type(Z)}"
)
if enforce_univariate:
_check_is_univariate(Z)
# check time index
check_time_index(
Z.index, allow_empty=allow_empty, enforce_index_type=enforce_index_type
)
return Z | 5831c75953b8953ec54982712c1e4d3cccb22cc8 | 3,652,714 |
def B(s):
"""string to byte-string in
Python 2 (including old versions that don't support b"")
and Python 3"""
if type(s)==type(u""): return s.encode('utf-8') # Python 3
return s | b741bf4a64bd866283ca789745f373db360f4016 | 3,652,715 |
def gather_tiling_strategy(data, axis):
"""Custom tiling strategy for gather op"""
strategy = list()
base = 0
for priority_value, pos in enumerate(range(len(data.shape) - 1, axis, -1)):
priority_value = priority_value + base
strategy.append(ct_util.create_constraint_on_tensor(tensor=data,
values=priority_value,
constraints=ct_util.TileConstraint.SET_PRIORITY,
tensor_pos=pos)[0])
return strategy | afceb113c9b6c25f40f4f885ccaf08860427291f | 3,652,716 |
from typing import Optional
import requests
import time
def fetch(
uri: str, auth: Optional[str] = None, endpoint: Optional[str] = None, **data
) -> OAuthResponse:
"""Perform post given URI with auth and provided data."""
req = requests.Request("POST", uri, data=data, auth=auth)
prepared = req.prepare()
timeout = time.time() + app.config["OAUTH_FETCH_TOTAL_TIMEOUT"]
retry = 0
result = _error(
errors.SERVER_ERROR, "An unknown error occurred talking to provider."
)
for i in range(app.config["OAUTH_FETCH_TOTAL_RETRIES"]):
prefix = "attempt #%d %s" % (i + 1, uri)
# TODO: Add jitter to backoff and/or retry after?
backoff = (2**i - 1) * app.config["OAUTH_FETCH_BACKOFF_FACTOR"]
remaining_timeout = timeout - time.time()
if (retry or backoff) > remaining_timeout:
app.logger.debug("Abort %s no timeout remaining.", prefix)
break
elif (retry or backoff) > 0:
app.logger.debug("Retry %s [sleep %.3f]", prefix, retry or backoff)
time.sleep(retry or backoff)
result, status, retry = _fetch(prepared, remaining_timeout, endpoint)
labels = {"endpoint": endpoint, "status": stats.status(status)}
stats.ClientRetryHistogram.labels(**labels).observe(i)
if status is not None and "error" in result:
error = result["error"]
error = app.config["OAUTH_FETCH_ERROR_TYPES"].get(error, error)
if error not in errors.DESCRIPTIONS:
error = "invalid_error"
stats.ClientErrorCounter.labels(error=error, **labels).inc()
if status is None:
pass # We didn't even get a response, so try again.
elif status not in app.config["OAUTH_FETCH_RETRY_STATUS_CODES"]:
break
elif "error" not in result:
break # No error reported so might as well return it.
app.logger.debug(
"Result %s [status %s] [retry after %s]", prefix, status, retry
)
# TODO: consider returning retry after time so it can be used.
return result | c9fc1cf96fa0f0037b50ec30a68d17ea05d892d9 | 3,652,717 |
def redscreen(main_filename, back_filename):
"""
Implements the notion of "redscreening". That is,
the image in the main_filename has its "sufficiently"
red pixels replaced with pized from the corresponding x,y
location in the image in the file back_filename.
Returns the resulting "redscreened" image.
"""
image = SimpleImage(main_filename)
back = SimpleImage(back_filename)
for pixel in image:
average = (pixel.red + pixel.green + pixel.blue) // 3
# See if this pixel is "sufficiently" red
if pixel.red >= average * INTENSITY_THRESHOLD:
# If so, we get the corresponding pixel from the
# back image and overwrite the pixel in
# the main image with that from the back image.
x = pixel.x
y = pixel.y
image.set_pixel(x, y, back.get_pixel(x, y))
return image | 96824872ceb488497fbd56a662b8fb5098bf2341 | 3,652,718 |
def density_speed_conversion(N, frac_per_car=0.025, min_val=0.2):
"""
Fraction to multiply speed by if there are N nearby vehicles
"""
z = 1.0 - (frac_per_car * N)
# z = 1.0 - 0.04 * N
return max(z, min_val) | 95285a11be84df5ec1b6c16c5f24b3831b1c0348 | 3,652,719 |
import logging
def connect_to_amqp(sysconfig):
"""
Connect to an AMQP Server, and return the connection and Exchange.
:param sysconfig: The slickqaweb.model.systemConfiguration.amqpSystemConfiguration.AMQPSystemConfiguration instance
to use as the source of information of how to connect.
:return: (connection, exchange) on success, exception on error
"""
assert isinstance(sysconfig, AMQPSystemConfiguration)
configuration = dict()
configuration['AMQP'] = dict()
if hasattr(sysconfig, 'hostname') and sysconfig.hostname is not None:
configuration['AMQP']['hostname'] = sysconfig.hostname
else:
raise AMQPConnectionError(message="No hostname defined for AMQP connection.")
if hasattr(sysconfig, 'port') and sysconfig.port is not None:
configuration['AMQP']['port'] = sysconfig.port
if hasattr(sysconfig, 'username') and sysconfig.username is not None:
configuration['AMQP']['username'] = sysconfig.username
if hasattr(sysconfig, 'password') and sysconfig.password is not None:
configuration['AMQP']['password'] = sysconfig.password
if hasattr(sysconfig, 'virtualHost') and sysconfig.virtualHost is not None:
configuration['AMQP']['virtual host'] = sysconfig.virtualHost
if hasattr(sysconfig, 'exchangeName') and sysconfig.exchangeName is not None:
configuration['AMQP']['exchange'] = sysconfig.exchangeName
else:
raise AMQPConnectionError(message="No exchange defined for AMQP connection.")
logger = logging.getLogger("slickqaweb.amqpcon.connect_to_amqp")
url = str.format("amqp://{hostname}:{port}", **dict(list(configuration['AMQP'].items())))
if 'virtual host' in configuration['AMQP'] and configuration['AMQP']['virtual host'] != '':
url = str.format("{}/{}", url, configuration['AMQP']['virtual host'])
logger.debug("AMQPConnection configured with url %s", url)
exchange = Exchange(configuration['AMQP'].get('exchange', "amqp.topic"), type='topic', durable=True)
logger.debug("AMQPConnection is using exchange %s", exchange)
connection = None
if 'username' in configuration['AMQP'] and 'password' in configuration['AMQP']:
username = configuration['AMQP']['username']
password = configuration['AMQP']['password']
logger.debug("Using username %s and password %s to connect to AMQP Broker", username, password)
connection = Connection(url, userid=username, password=password)
else:
connection = Connection(url)
# typically connection connect on demand, but we want to flush out errors before proceeding
connection.connect()
exchange = exchange(connection)
exchange.declare()
return (connection, exchange) | 82a14bfd757caa1666c172d4aba64a2053ad5810 | 3,652,720 |
def int_to_datetime(int_date, ds=None):
"""Convert integer date indices to datetimes."""
if ds is None:
return TIME_ZERO + int_date * np.timedelta64(1, 'D')
if not hasattr(ds, 'original_time'):
raise ValueError('Dataset with no original_time cannot be used to '
'convert ints to datetimes.')
first_int = ds.time.values[0]
delta_int = _get_delta(ds.time)
first_date = ds.original_time.values[0]
delta_date = _get_delta(ds.original_time)
return first_date + ((int_date - first_int) / delta_int) * delta_date | 2e081ff019628800fb5c44eec4fa73333d755dde | 3,652,721 |
def show_plugin(name, path, user):
"""
Show a plugin in a wordpress install and check if it is installed
name
Wordpress plugin name
path
path to wordpress install location
user
user to run the command as
CLI Example:
.. code-block:: bash
salt '*' wordpress.show_plugin HyperDB /var/www/html apache
"""
ret = {"name": name}
resp = __salt__["cmd.shell"](
("wp --path={0} plugin status {1}").format(path, name), runas=user
).split("\n")
for line in resp:
if "Status" in line:
ret["status"] = line.split(" ")[-1].lower()
elif "Version" in line:
ret["version"] = line.split(" ")[-1].lower()
return ret | fded4735eda73dc19dd51dc13a1141345505b3b9 | 3,652,722 |
def get_receiver_type(rinex_fname):
"""
Return the receiver type (header line REC # / TYPE / VERS) found
in *rinex_fname*.
"""
with open(rinex_fname) as fid:
for line in fid:
if line.rstrip().endswith('END OF HEADER'):
break
elif line.rstrip().endswith('REC # / TYPE / VERS'):
return line[20:40].strip()
raise ValueError('receiver type not found in header of RINEX file '
'{}'.format(rinex_fname)) | 7391f7a100455b8ff5ab01790f62518a3c4a079b | 3,652,723 |
def is_cyclone_phrase(phrase):
"""Returns whether all the space-delimited words in phrases are cyclone words
A phrase is a cyclone phrase if and only if all of its component words are
cyclone words, so we first split the phrase into words using .split(), and then
check if all of the words are cyclone words.
"""
return all([is_cyclone_word(word) for word in phrase.split()]) | 8014490ea2391b1acec1ba641ba89277065f2dd9 | 3,652,724 |
def wl_to_wavenumber(wl, angular=False):
"""Given wavelength in meters, convert to wavenumber in 1/cm. The wave
number represents the number of wavelengths in one cm. If angular is true,
will calculate angular wavenumber. """
if angular:
wnum = (2*np.pi)/(wl*100)
else:
wnum = 1/(wl*100)
return wnum | ca34e3abc5f9ed0d555c836819c9f3c8d3ab9e4b | 3,652,725 |
def easeOutCubic(n):
"""A cubic tween function that begins fast and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
n = n - 1
return n**3 + 1 | 20aea25b2ee937618df2b674178f2a767c373da7 | 3,652,726 |
import torch
import sys
import copy
def _evaluate_batch_beam(model, params, dico, batch, lengths, positions, langs, src_lens, trg_lens, \
gen_type, alpha, beta, gamma, dec_len, iter_mult, selected_pos, beam_size, length_penalty):
"""Run on one example"""
n_iter = dec_len * iter_mult
# log probabilities of previously present tokens at each position
vocab_size = len(dico)
mask_tok = dico.word2id[MASK_WORD]
total_topbeam_scores = np.array([0.] * beam_size)
batch = batch.repeat((1, beam_size))
lengths = lengths.repeat((beam_size))
positions = positions.repeat((1, beam_size))
langs = langs.repeat((1, beam_size))
for dec_iter in range(n_iter):
# predict the token depending on selected_pos
pred_mask = torch.zeros_like(batch).byte()
if gen_type == "src2trg":
pred_mask[src_lens[0] + selected_pos[dec_iter%dec_len] + 1, :] = 1
elif gen_type == "trg2src":
pred_mask[selected_pos[dec_iter%dec_len] + 1, :] = 1
# NOTE(Alex): shouldn't there be some masking here?
tensor = model('fwd', x=batch, lengths=lengths, positions=positions, langs=langs, causal=False)
# beam_size x |V|
scores_pred = model('predict_wo_targets', tensor=tensor, pred_mask=pred_mask)
# get top_beam scores and tokens; need to take log softamx so scores are on same scale
log_probs_pred = torch.log_softmax(scores_pred, dim=-1)
# beam_size x beam_size
topbeam_scores, topbeam_toks = log_probs_pred.topk(beam_size, dim=-1)
### exception for first
if dec_iter == 0:
total_topbeam_scores = total_topbeam_scores + np.diagonal(topbeam_scores.cpu().numpy())# + selected_pos_scores
for i_beam, topbeam_tok in enumerate(torch.diagonal(topbeam_toks)):
# substitute that token in
if gen_type == "src2trg":
batch[src_lens[0] + selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
elif gen_type == "trg2src":
batch[selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
else:
sys.exit("something is wrong")
continue
### all iterations except first
# compute updated beam scores
topbeam_scores = topbeam_scores.cpu().numpy()
new_total_topbeam_scores = np.expand_dims(total_topbeam_scores, 1) + topbeam_scores
# sort and take beam_size highest
new_topbeam_tok_flat = new_total_topbeam_scores.reshape(-1).argsort()[-beam_size:]
# create clones of the tokens and scores so far so we don't overwrite when updating
batch_clone = batch.clone()
total_topbeam_scores_clone = copy.deepcopy(total_topbeam_scores)
# iterate over the highest scoring beams
for i_beam, topbeam_tok_flat in enumerate(new_topbeam_tok_flat):
topbeam_tok_row = int(np.floor(topbeam_tok_flat / beam_size))
topbeam_tok_col = int(topbeam_tok_flat % beam_size)
topbeam_tok = topbeam_toks[topbeam_tok_row][topbeam_tok_col]
batch[:,i_beam] = batch_clone[:, topbeam_tok_row]
total_topbeam_scores[i_beam] = total_topbeam_scores_clone[topbeam_tok_row] + \
topbeam_scores[topbeam_tok_row][topbeam_tok_col]
# substitute that token in
if gen_type == "src2trg":
batch[src_lens[0] + selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
elif gen_type == "trg2src":
batch[selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
else:
sys.exit("something is wrong")
return batch, total_topbeam_scores / (dec_len ** length_penalty) | 0ea481615566e98f9d4d2769b8b2fe99a2392beb | 3,652,727 |
import re
def _fix_entries(entries):
"""recursive function to collapse entries into correct format"""
cur_chapter_re, chapter_entry = None, None
new_entries = []
for entry in entries:
title, doxy_path, subentries = entry
if subentries is not None:
new_subentries = _fix_entries(subentries)
new_entries.append([title, doxy_path, new_subentries])
elif cur_chapter_re and cur_chapter_re.match(title):
chapter_entry[2].append(entry)
else:
new_entries.append(entry)
chapter_match = CHAPTER_RE.match(title)
if chapter_match:
cur_chapter_re = re.compile(
chapter_match.group('num') + r'\.\d+:')
chapter_entry = entry
chapter_entry[-1] = []
else:
cur_chapter_re, chapter_entry = None, None
return new_entries | 1f8ac466533c17c1ad4e7cf2d27f2e7ff098ae79 | 3,652,728 |
def _check_bulk_delete(attempted_pairs, result):
"""
Checks if the RCv3 bulk delete command was successful.
"""
response, body = result
if response.code == 204: # All done!
return body
errors = []
non_members = pset()
for error in body["errors"]:
match = _SERVER_NOT_A_MEMBER_PATTERN.match(error)
if match is not None:
pair = match.groupdict()
non_members = non_members.add(
(normalize_lb_id(pair["lb_id"]), pair["server_id"]))
continue
match = _LB_INACTIVE_PATTERN.match(error)
if match is not None:
errors.append(LBInactive(match.group("lb_id")))
continue
match = _LB_DOESNT_EXIST_PATTERN.match(error)
if match is not None:
del_lb_id = normalize_lb_id(match.group("lb_id"))
# consider all pairs with this LB to be removed
removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
if lb_id == del_lb_id]
non_members |= pset(removed)
continue
match = _SERVER_DOES_NOT_EXIST.match(error)
if match is not None:
del_server_id = match.group("server_id")
# consider all pairs with this server to be removed
removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
if node_id == del_server_id]
non_members |= pset(removed)
else:
raise UnknownBulkResponse(body)
if errors:
raise BulkErrors(errors)
elif non_members:
to_retry = pset(attempted_pairs) - non_members
return bulk_delete(to_retry) if to_retry else None
else:
raise UnknownBulkResponse(body) | 2bf99d74a23d3522a2a53a711fbb2c8d43748eb1 | 3,652,729 |
def get_frontend_ui_base_url(config: "CFG") -> str:
"""
Return ui base url
"""
return as_url_folder(urljoin(get_root_frontend_url(config), FRONTEND_UI_SUBPATH)) | 4c34a1830431e28ec084853be6d93f1e487865a9 | 3,652,730 |
def read_image(name):
"""
Reads image into a training example. Might be good to threshold it.
"""
im = Image.open(name)
pix = im.load()
example = []
for x in range(16):
for y in range(16):
example.append(pix[x, y])
return example | be510bfee0a24e331d1b9bfb197b02edaafd0d70 | 3,652,731 |
def l3tc_underlay_lag_config_unconfig(config, dut1, dut2, po_name, members_dut1, members_dut2):
"""
:param config:
:param dut1:
:param dut2:
:param po_name:
:param members_dut1:
:param members_dut2:
:return:
"""
st.banner("{}Configuring LAG between Spine and Leaf node.".format('Un' if config != 'yes' else ''))
result = True
if config == 'yes':
# configure po and add members
[out, exceptions] = \
utils.exec_all(fast_start, [[poapi.config_portchannel, dut1, dut2, po_name,
members_dut1, members_dut2, "add"]])
st.log([out, exceptions])
else:
# del po and delete members
[out, exceptions] = \
utils.exec_all(fast_start, [[poapi.config_portchannel, dut1, dut2, po_name,
members_dut1, members_dut2, "del"]])
st.log([out, exceptions])
return result | de4c8775b178380e5d9c90ee3c74082d6553d97f | 3,652,732 |
def _xrdcp_copyjob(wb, copy_job: CopyFile, xrd_cp_args: XrdCpArgs, printout: str = '') -> int:
"""xrdcp based task that process a copyfile and it's arguments"""
if not copy_job: return
overwrite = xrd_cp_args.overwrite
batch = xrd_cp_args.batch
sources = xrd_cp_args.sources
chunks = xrd_cp_args.chunks
chunksize = xrd_cp_args.chunksize
makedir = xrd_cp_args.makedir
tpc = xrd_cp_args.tpc
posc = xrd_cp_args.posc
# hashtype = xrd_cp_args.hashtype
streams = xrd_cp_args.streams
cksum = xrd_cp_args.cksum
timeout = xrd_cp_args.timeout
rate = xrd_cp_args.rate
cmdline = f'{copy_job.src} {copy_job.dst}'
return retf_print(_xrdcp_sysproc(cmdline, timeout)) | ce4475329a6f75d1819874492f26ceef7113a0f2 | 3,652,733 |
import logging
def merge_preclusters_ld(preclusters):
"""
Bundle together preclusters that share one LD snp
* [ Cluster ]
Returntype: [ Cluster ]
"""
clusters = list(preclusters)
for cluster in clusters:
chrom = cluster.gwas_snps[0].snp.chrom
start = min(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
end = max(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
# A dictionary that maps from snp to merged clusters
snp_owner = dict()
for cluster in preclusters:
for ld_snp in cluster.ld_snps:
# If this SNP has been seen in a different cluster
if ld_snp in snp_owner and snp_owner[ld_snp] is not cluster:
# Set other_cluster to that different cluster
other_cluster = snp_owner[ld_snp]
merged_cluster = merge_clusters(cluster, other_cluster)
# Remove the two previous clusters and replace them with
# the merged cluster
clusters.remove(cluster)
clusters.remove(other_cluster)
clusters.append(merged_cluster)
# Set the new cluster as the owner of these SNPs.
for snp in merged_cluster.ld_snps:
snp_owner[snp] = merged_cluster
for snp in cluster.ld_snps:
snp_owner[snp] = merged_cluster
# Skip the rest of this cluster.
break
else:
snp_owner[ld_snp] = cluster
for cluster in clusters:
chrom = cluster.gwas_snps[0].snp.chrom
start = min(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
end = max(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
logging.info("\tFound %i clusters from the GWAS peaks" % (len(clusters)))
return clusters | fd5409c1fc8463a2c795b2cc2685c1cf1a77f4ad | 3,652,734 |
def cross_recurrence_matrix( xps, yps ):
"""Cross reccurence matrix.
Args:
xps (numpy.array):
yps (numpy.array):
Returns:
numpy.array : A 2D numpy array.
"""
return recurrence_matrix( xps, yps ) | 017fa50fdd3c68e4bf1703635365d84c3508d0b3 | 3,652,735 |
import numpy
def define_panels(x, y, N=40):
"""
Discretizes the geometry into panels using 'cosine' method.
Parameters
----------
x: 1D array of floats
x-coordinate of the points defining the geometry.
y: 1D array of floats
y-coordinate of the points defining the geometry.
N: integer, optional
Number of panels;
default: 40.
Returns
-------
panels: 1D Numpy array of Panel objects.
The list of panels.
"""
R = (x.max()-x.min())/2.0 # circle radius
x_center = (x.max()+x.min())/2.0 # x-coordinate of circle center
theta = numpy.linspace(0.0, 2.0*numpy.pi, N+1) # array of angles
x_circle = x_center + R*numpy.cos(theta) # x-coordinates of circle
x_ends = numpy.copy(x_circle) # x-coordinate of panels end-points
y_ends = numpy.empty_like(x_ends) # y-coordinate of panels end-points
# extend coordinates to consider closed surface
x, y = numpy.append(x, x[0]), numpy.append(y, y[0])
# compute y-coordinate of end-points by projection
I = 0
for i in range(N):
while I < len(x)-1:
if (x[I] <= x_ends[i] <= x[I+1]) or (x[I+1] <= x_ends[i] <= x[I]):
break
else:
I += 1
a = (y[I+1]-y[I])/(x[I+1]-x[I])
b = y[I+1] - a*x[I+1]
y_ends[i] = a*x_ends[i] + b
y_ends[N] = y_ends[0]
# create panels
panels = numpy.empty(N, dtype=object)
for i in range(N):
panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1])
return panels | e34ae13a7cdddc8be69e5cbba84b964bd11e6ec3 | 3,652,736 |
def compile_for_llvm(function_name, def_string, optimization_level=-1,
globals_dict=None):
"""Compiles function_name, defined in def_string to be run through LLVM.
Compiles and runs def_string in a temporary namespace, pulls the
function named 'function_name' out of that namespace, optimizes it
at level 'optimization_level', -1 for the default optimization,
and marks it to be JITted and run through LLVM.
"""
namespace = {}
if globals_dict is None:
globals_dict = globals()
exec def_string in globals_dict, namespace
func = namespace[function_name]
if optimization_level is not None:
if optimization_level >= DEFAULT_OPT_LEVEL:
func.__code__.co_optimization = optimization_level
func.__code__.co_use_jit = True
return func | 61494afcde311e63138f75fb8bf59244d5c6d4e0 | 3,652,737 |
def setup_svm_classifier(training_data, y_training, testing_data, features, method="count", ngrams=(1,1)):
"""
Setup SVM classifier model using own implementation
Parameters
----------
training_data: Pandas dataframe
The dataframe containing the training data for the classifier
testing_data: Pandas dataframe
The dataframe containing the testing data for the classifier
y_training: Pandas dataframe
The dataframe containing the y training data for the classifier
features: String or list of strings if using multiple features
Names of columns of df that are used for trainig the classifier
method: String
Can be either "count" or "tfidf" for specifying method of feature weighting
ngrams: tuple (min_n, max_n), with min_n, max_n integer values
range for ngrams used for vectorization
Returns
-------
model: SVM Classifier (scratch implementation)
Trained SVM Classifier from own implementation
vec: sklearn CountVectorizer or TfidfVectorizer
CountVectorizer or TfidfVectorizer fit and transformed for training data
x_testing: Pandas dataframe
The dataframe containing the test data for the SVM classifier
"""
# generate x and y training data
if method == "count":
vec, x_training, x_testing = define_features_vectorizer(features, training_data, testing_data,ngramrange=ngrams)
elif method == "tfidf":
vec, x_training, x_testing = define_features_tfidf(features, training_data, testing_data,ngramrange=ngrams)
else:
print("Method has to be either count or tfidf")
return 1
# train classifier
model = SVMClassifier_scratch()
model.fit(x_training, y_training)
return model, vec, x_testing | 111937690db2c170852b57cdbfc3135c628ac26c | 3,652,738 |
def buildHeaderString(keys):
"""
Use authentication keys to build a literal header string that will be
passed to the API with every call.
"""
headers = {
# Request headers
'participant-key': keys["participantKey"],
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': keys["subscriptionKey"]
}
return headers | 4505fb679dec9727a62dd328f92f832ab45c417b | 3,652,739 |
from typing import List
from typing import cast
def build_goods_query(
good_ids: List[str], currency_id: str, is_searching_for_sellers: bool
) -> Query:
"""
Build buyer or seller search query.
Specifically, build the search query
- to look for sellers if the agent is a buyer, or
- to look for buyers if the agent is a seller.
In particular, if the agent is a buyer and the demanded good ids are {'tac_good_0', 'tac_good_2', 'tac_good_3'}, the resulting constraint expression is:
tac_good_0 >= 1 OR tac_good_2 >= 1 OR tac_good_3 >= 1
That is, the OEF will return all the sellers that have at least one of the good in the query
(assuming that the sellers are registered with the data model specified).
:param good_ids: the list of good ids to put in the query
:param currency_id: the currency used for pricing and transacting.
:param is_searching_for_sellers: Boolean indicating whether the query is for sellers (supply) or buyers (demand).
:return: the query
"""
data_model = _build_goods_datamodel(
good_ids=good_ids, is_supply=is_searching_for_sellers
)
constraints = [Constraint(good_id, ConstraintType(">=", 1)) for good_id in good_ids]
constraints.append(Constraint("currency_id", ConstraintType("==", currency_id)))
constraint_expr = cast(List[ConstraintExpr], constraints)
if len(good_ids) > 1:
constraint_expr = [Or(constraint_expr)]
query = Query(constraint_expr, model=data_model)
return query | 97cccadc265743d743f3e2e757e0c81ff110072b | 3,652,740 |
def make_piecewise_const(num_segments):
"""Makes a piecewise constant semi-sinusoid curve with num_segments segments."""
true_values = np.sin(np.arange(0, np.pi, step=0.001))
seg_idx = np.arange(true_values.shape[0]) // (true_values.shape[0] / num_segments)
return pd.Series(true_values).groupby(seg_idx).mean().tolist() | d6004488ae0109b730cb73dc9e58e65caaed8798 | 3,652,741 |
def convert_rational_from_float(number):
"""
converts a float to rational as form of a tuple.
"""
f = Fraction(str(number)) # str act as a round
return f.numerator, f.denominator | f3a00a150795b008ccc8667a3a0437eb2de2e2af | 3,652,742 |
def classname(obj):
"""Returns the name of an objects class"""
return obj.__class__.__name__ | 15b03c9ce341bd151187f03e8e95e6299e4756c3 | 3,652,743 |
def train(epoch, model, dataloader, optimizer, criterion, device, writer, cfg):
"""
training the model.
Args:
epoch (int): number of training steps.
model (class): model of training.
dataloader (dict): dict of dataset iterator. Keys are tasknames, values are corresponding dataloaders.
optimizer (Callable): optimizer of training.
criterion (Callable): loss criterion of training.
device (torch.device): device of training.
writer (class): output to tensorboard.
cfg: configutation of training.
Return:
losses[-1] : the loss of training
"""
model.train()
metric = PRMetric()
losses = []
for batch_idx, (x, y) in enumerate(dataloader, 1):
for key, value in x.items():
x[key] = value.to(device)
y = y.to(device)
optimizer.zero_grad()
y_pred = model(x)
if cfg.model_name == 'capsule':
loss = model.loss(y_pred, y)
else:
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
metric.update(y_true=y, y_pred=y_pred)
losses.append(loss.item())
data_total = len(dataloader.dataset)
data_cal = data_total if batch_idx == len(dataloader) else batch_idx * len(y)
if (cfg.train_log and batch_idx % cfg.log_interval == 0) or batch_idx == len(dataloader):
# p r f1 皆为 macro,因为micro时三者相同,定义为acc
acc, p, r, f1 = metric.compute()
logger.info(f'Train Epoch {epoch}: [{data_cal}/{data_total} ({100. * data_cal / data_total:.0f}%)]\t'
f'Loss: {loss.item():.6f}')
logger.info(f'Train Epoch {epoch}: Acc: {100. * acc:.2f}%\t'
f'macro metrics: [p: {p:.4f}, r:{r:.4f}, f1:{f1:.4f}]')
if cfg.show_plot and not cfg.only_comparison_plot:
if cfg.plot_utils == 'matplot':
plt.plot(losses)
plt.title(f'epoch {epoch} train loss')
plt.show()
if cfg.plot_utils == 'tensorboard':
for i in range(len(losses)):
writer.add_scalar(f'epoch_{epoch}_training_loss', losses[i], i)
return losses[-1] | 41de6aa37b41c837d9921e673414a70cc798478b | 3,652,744 |
def custom_timeseries_widget_for_behavior(node, **kwargs):
"""Use a custom TimeSeries widget for behavior data"""
if node.name == 'Velocity':
return SeparateTracesPlotlyWidget(node)
else:
return show_timeseries(node) | 34b296ab98b0eb6f9e2ddd080d5919a0a7158adc | 3,652,745 |
def db_tween_factory(handler, registry):
"""A database tween, doing automatic session management."""
def db_tween(request):
response = None
try:
response = handler(request)
finally:
session = getattr(request, "_db_session", None)
if session is not None:
# always rollback/close the read-only session
try:
session.rollback()
except DatabaseError:
registry.raven_client.captureException()
finally:
registry.db.release_session(session)
return response
return db_tween | 5e5150855db08931af8ba82e3f44e51b6caf54f3 | 3,652,746 |
import time
def calibrate_profiler(n, timer=time.time):
"""
Calibration routine to returns the fudge factor. The fudge factor
is the amount of time it takes to call and return from the
profiler handler. The profiler can't measure this time, so it
will be attributed to the user code unless it's subtracted off.
"""
starttime = timer()
p = Profiler(fudge=0.0)
for i in range(n):
a_very_long_function_name()
p.stop()
stoptime = timer()
simpletime = p.get_time('a_very_long_function_name')
realtime = stoptime - starttime
profiletime = simpletime + p.overhead
losttime = realtime - profiletime
return losttime/(2*n) # 2 profile events per function call | ee1f0af52f5530542503be4f277c90f249f83fb5 | 3,652,747 |
def getbias(x, bias):
"""Bias in Ken Perlin’s bias and gain functions."""
return x / ((1.0 / bias - 2.0) * (1.0 - x) + 1.0 + 1e-6) | 0bc551e660e133e0416f5e426e5c7c302ac3fbbe | 3,652,748 |
from typing import Dict
from typing import Optional
def get_exif_flash_fired(exif_data: Dict) -> Optional[bool]:
"""
Parses the "flash" value from exif do determine if it was fired.
Possible values:
+-------------------------------------------------------+------+----------+-------+
| Status | Hex | Binary | Fired |
+-------------------------------------------------------+------+----------+-------+
| No Flash | 0x0 | 00000000 | No |
| Fired | 0x1 | 00000001 | Yes |
| "Fired, Return not detected" | 0x5 | 00000101 | Yes |
| "Fired, Return detected" | 0x7 | 00000111 | Yes |
| "On, Did not fire" | 0x8 | 00001000 | No |
| "On, Fired" | 0x9 | 00001001 | Yes |
| "On, Return not detected" | 0xd | 00001011 | Yes |
| "On, Return detected" | 0xf | 00001111 | Yes |
| "Off, Did not fire" | 0x10 | 00010000 | No |
| "Off, Did not fire, Return not detected" | 0x14 | 00010100 | No |
| "Auto, Did not fire" | 0x18 | 00011000 | No |
| "Auto, Fired" | 0x19 | 00011001 | Yes |
| "Auto, Fired, Return not detected" | 0x1d | 00011101 | Yes |
| "Auto, Fired, Return detected" | 0x1f | 00011111 | Yes |
| No flash function | 0x20 | 00100000 | No |
| "Off, No flash function" | 0x30 | 00110000 | No |
| "Fired, Red-eye reduction" | 0x41 | 01000001 | Yes |
| "Fired, Red-eye reduction, Return not detected" | 0x45 | 01000101 | Yes |
| "Fired, Red-eye reduction, Return detected" | 0x47 | 01000111 | Yes |
| "On, Red-eye reduction" | 0x49 | 01001001 | Yes |
| "On, Red-eye reduction, Return not detected" | 0x4d | 01001101 | Yes |
| "On, Red-eye reduction, Return detected" | 0x4f | 01001111 | Yes |
| "Off, Red-eye reduction" | 0x50 | 01010000 | No |
| "Auto, Did not fire, Red-eye reduction" | 0x58 | 01011000 | No |
| "Auto, Fired, Red-eye reduction" | 0x59 | 01011001 | Yes |
| "Auto, Fired, Red-eye reduction, Return not detected" | 0x5d | 01011101 | Yes |
| "Auto, Fired, Red-eye reduction, Return detected" | 0x5f | 01011111 | Yes |
+-------------------------------------------------------+------+----------+-------+
:param exif_data:
:return: If the flash was fired, or None if the exif information is not present
"""
if 'Flash' not in exif_data:
return None
return bool((int(exif_data['Flash']) & 1) > 0) | 82b4fc095d60426622202243f141614b9632340f | 3,652,749 |
import shlex
import os
def gyp_generator_flags():
"""Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', ''))) | 51777f7b9ad87291dc176ce4673cb8a9cd5864f9 | 3,652,750 |
import requests
import json
def get_geoJson(addr):
"""
Queries the Google Maps API for specified address, returns
a dict of the formatted address, the state/territory name, and
a float-ified version of the latitude and longitude.
"""
res = requests.get(queryurl.format(addr=addr, gmapkey=gmapkey))
dictr = {}
if res.json()["status"] == "ZERO_RESULTS" or not res.ok:
dictr["res"] = res
else:
print(json.dumps(res.json(), indent=4))
rresj = res.json()["results"][0]
dictr["formatted_address"] = rresj["formatted_address"]
dictr["latlong"] = rresj["geometry"]["location"]
for el in rresj["address_components"]:
if el["types"][0] == "administrative_area_level_1":
dictr["state"] = el["short_name"]
return dictr | 500c2aa18c8b3b305c912b91efcc9f51121ca7b3 | 3,652,751 |
def display_data_in_new_tab(message, args, pipeline_data):
""" Displays the current message data in a new tab """
window = sublime.active_window()
tab = window.new_file()
tab.set_scratch(True)
edit_token = message['edit_token']
tab.insert(edit_token, 0, message['data'])
return tab | a64b7ac4138b921a53adb96b9933f1825048b955 | 3,652,752 |
def _cost( q,p, xt_measure, connec, params ) :
"""
Returns a total cost, sum of a small regularization term and the data attachment.
.. math ::
C(q_0, p_0) = .01 * H(q0,p0) + 1 * A(q_1, x_t)
Needless to say, the weights can be tuned according to the signal-to-noise ratio.
"""
s,r = params # Deformation scale, Attachment scale
q1 = _HamiltonianShooting(q,p,s)[0] # Geodesic shooting from q0 to q1
# To compute a data attachment cost, we need the set of vertices 'q1' into a measure.
q1_measure = Curve._vertices_to_measure( q1, connec )
attach_info = _data_attachment( q1_measure, xt_measure, r )
return [ .01* _Hqp(q, p, s) + 1* attach_info[0] , attach_info[1] ] | 193d23a11d9704867d0a89846a6a7187de1e953a | 3,652,753 |
def get_full_lang_code(lang=None):
""" Get the full language code
Args:
lang (str, optional): A BCP-47 language code, or None for default
Returns:
str: A full language code, such as "en-us" or "de-de"
"""
if not lang:
lang = __active_lang
return lang or "en-us" | 1e0e49797dc5ed3f1fd148ac4ca1ca073231268c | 3,652,754 |
def acquire_images(cam, nodemap, nodemap_tldevice):
"""
This function acquires and saves 10 images from a device.
:param cam: Camera to acquire images from.
:param nodemap: Device nodemap.
:param nodemap_tldevice: Transport layer device nodemap.
:type cam: CameraPtr
:type nodemap: INodeMap
:type nodemap_tldevice: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print '*** IMAGE ACQUISITION ***\n'
try:
result = True
# Set acquisition mode to continuous
#
# *** NOTES ***
# Because the example acquires and saves 10 images, setting acquisition
# mode to continuous lets the example finish. If set to single frame
# or multiframe (at a lower number of images), the example would just
# hang. This would happen because the example has been written to
# acquire 10 images while the camera would have been programmed to
# retrieve less than that.
#
# Setting the value of an enumeration node is slightly more complicated
# than other node types. Two nodes must be retrieved: first, the
# enumeration node is retrieved from the nodemap; and second, the entry
# node is retrieved from the enumeration node. The integer value of the
# entry node is then set as the new value of the enumeration node.
#
# Notice that both the enumeration and the entry nodes are checked for
# availability and readability/writability. Enumeration nodes are
# generally readable and writable whereas their entry nodes are only
# ever readable.
#
# Retrieve enumeration node from nodemap
# In order to access the node entries, they have to be casted to a pointer type (CEnumerationPtr here)
node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode('AcquisitionMode'))
if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):
print 'Unable to set acquisition mode to continuous (enum retrieval). Aborting...'
return False
# Retrieve entry node from enumeration node
node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName('Continuous')
if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous):
print 'Unable to set acquisition mode to continuous (entry retrieval). Aborting...'
return False
# Retrieve integer value from entry node
acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()
# Set integer value from entry node as new value of enumeration node
node_acquisition_mode.SetIntValue(acquisition_mode_continuous)
print 'Acquisition mode set to continuous...'
# Begin acquiring images
#
# *** NOTES ***
# What happens when the camera begins acquiring images depends on the
# acquisition mode. Single frame captures only a single image, multi
# frame catures a set number of images, and continuous captures a
# continuous stream of images. Because the example calls for the
# retrieval of 10 images, continuous mode has been set.
#
# *** LATER ***
# Image acquisition must be ended when no more images are needed.
cam.BeginAcquisition()
print 'Acquiring images...'
# Retrieve device serial number for filename
#
# *** NOTES ***
# The device serial number is retrieved in order to keep cameras from
# overwriting one another. Grabbing image IDs could also accomplish
# this.
device_serial_number = ''
node_device_serial_number = PySpin.CStringPtr(nodemap_tldevice.GetNode('DeviceSerialNumber'))
if PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(node_device_serial_number):
device_serial_number = node_device_serial_number.GetValue()
print 'Device serial number retrieved as %s...' % device_serial_number
# Retrieve, convert, and save images
for i in range(NUM_IMAGES):
try:
# Retrieve next received image
#
# *** NOTES ***
# Capturing an image houses images on the camera buffer. Trying
# to capture an image that does not exist will hang the camera.
#
# *** LATER ***
# Once an image from the buffer is saved and/or no longer
# needed, the image must be released in order to keep the
# buffer from filling up.
image_result = cam.GetNextImage(1000)
# Ensure image completion
#
# *** NOTES ***
# Images can easily be checked for completion. This should be
# done whenever a complete image is expected or required.
# Further, check image status for a little more insight into
# why an image is incomplete.
if image_result.IsIncomplete():
print 'Image incomplete with image status %d ...' % image_result.GetImageStatus()
else:
# Print image information; height and width recorded in pixels
#
# *** NOTES ***
# Images have quite a bit of available metadata including
# things such as CRC, image status, and offset values, to
# name a few.
width = image_result.GetWidth()
height = image_result.GetHeight()
print 'Grabbed Image %d, width = %d, height = %d' % (i, width, height)
# Convert image to mono 8
#
# *** NOTES ***
# Images can be converted between pixel formats by using
# the appropriate enumeration value. Unlike the original
# image, the converted one does not need to be released as
# it does not affect the camera buffer.
#
# When converting images, color processing algorithm is an
# optional parameter.
image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
# Create a unique filename
if device_serial_number:
filename = 'Acquisition-%s-%d.jpg' % (device_serial_number, i)
else: # if serial number is empty
filename = 'Acquisition-%d.jpg' % i
# Save image
#
# *** NOTES ***
# The standard practice of the examples is to use device
# serial numbers to keep images of one device from
# overwriting those of another.
image_converted.Save(filename)
print 'Image saved at %s' % filename
# Release image
#
# *** NOTES ***
# Images retrieved directly from the camera (i.e. non-converted
# images) need to be released in order to keep from filling the
# buffer.
image_result.Release()
print ''
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
return False
# End acquisition
#
# *** NOTES ***
# Ending acquisition appropriately helps ensure that devices clean up
# properly and do not need to be power-cycled to maintain integrity.
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
return False
return result | dd3454b3ddbff27dd73750c630ff5e63737fa50c | 3,652,755 |
import importlib
def apply_operations(source: dict, graph: BaseGraph) -> BaseGraph:
"""
Apply operations as defined in the YAML.
Parameters
----------
source: dict
The source from the YAML
graph: kgx.graph.base_graph.BaseGraph
The graph corresponding to the source
Returns
-------
kgx.graph.base_graph.BaseGraph
The graph corresponding to the source
"""
operations = source['operations']
for operation in operations:
op_name = operation['name']
op_args = operation['args']
module_name = '.'.join(op_name.split('.')[0:-1])
function_name = op_name.split('.')[-1]
f = getattr(importlib.import_module(module_name), function_name)
log.info(f"Applying operation {op_name} with args: {op_args}")
f(graph, **op_args)
return graph | d78410d27da574efc30d08555eaefde0c77cb513 | 3,652,756 |
def tt_logdotexp(A, b):
"""Construct a Theano graph for a numerically stable log-scale dot product.
The result is more or less equivalent to `tt.log(tt.exp(A).dot(tt.exp(b)))`
"""
A_bcast = A.dimshuffle(list(range(A.ndim)) + ["x"])
sqz = False
shape_b = ["x"] + list(range(b.ndim))
if len(shape_b) < 3:
shape_b += ["x"]
sqz = True
b_bcast = b.dimshuffle(shape_b)
res = tt_logsumexp(A_bcast + b_bcast, axis=1)
return res.squeeze() if sqz else res | f543557a0b24159ede7d8cc0c8ed5df3ed2123f4 | 3,652,757 |
def _check_like(val, _np_types, _native_types, check_str=None): # pylint: disable=too-many-return-statements
"""
Checks the follwing:
- if val is instance of _np_types or _native_types
- if val is a list or ndarray of _np_types or _native_types
- if val is a string or list of strings that can be parsed by check_str
Does not check:
- if val is an ndarray of strings that can be parsed by check_str
"""
_all_types = _np_types + _native_types
if isinstance(val, _all_types):
return True
elif isinstance(val, string_types):
return check_str and check_str(val)
elif isinstance(val, (list, tuple)):
for v in val:
if isinstance(v, string_types):
if check_str and check_str(v):
continue
if not isinstance(v, _all_types):
return False
return True
elif hasattr(val, 'dtype'):
if val.dtype == np.object:
return all(isinstance(v, _native_types) for v in val)
else:
return val.dtype.type in _np_types
else:
return False | ab7875d329c09a491178b721c112b64142d2e566 | 3,652,758 |
def rotation_matrix(x, y, theta):
""" Calculate the rotation matrix. Origin is assumed to be (0, 0)
theta must be in radians
"""
return [np.cos(theta) * x - np.sin(theta) * y, np.sin(theta) * x + np.cos(theta) * y] | 53f646429f7a4b719b197cacbc71442ebef719d4 | 3,652,759 |
from typing import List
def create_players(num_human: int, num_random: int, smart_players: List[int]) \
-> List[Player]:
"""Return a new list of Player objects.
<num_human> is the number of human player, <num_random> is the number of
random players, and <smart_players> is a list of difficulty levels for each
SmartPlayer that is to be created.
The list should contain <num_human> HumanPlayer objects first, then
<num_random> RandomPlayer objects, then the same number of SmartPlayer
objects as the length of <smart_players>. The difficulty levels in
<smart_players> should be applied to each SmartPlayer object, in order.
"""
goal = generate_goals(num_random + num_human + len(smart_players))
final = []
for x in range(num_human):
final.append(HumanPlayer(x, goal[x]))
for y in range(num_random):
final.append(RandomPlayer(num_human + y, goal[num_human + y]))
for z in range(len(smart_players)):
final.append(SmartPlayer(num_human + num_random + z,
goal[num_human + num_random + z],
smart_players[z]))
return final | 10a7e840992417d46c79d794e66e1de5de16dd95 | 3,652,760 |
import os
import stat
def file_info(path):
"""
Return file information on `path`. Example output:
{
'filename': 'passwd',
'dir': '/etc/',
'path': '/etc/passwd',
'type': 'file',
'size': 2790,
'mode': 33188,
'uid': 0,
'gid': 0,
'device': 64769
}
"""
fname = os.path.basename(path)
fdir = os.path.dirname(path)
fstat = os.lstat(path)
ftype = file_types.get(stat.S_IFMT(fstat.st_mode), "unknown")
return {
"filename": fname,
"dir": fdir,
"path": path,
"type": ftype,
"size": fstat.st_size,
"mode": fstat.st_mode,
"uid": fstat.st_uid,
"gid": fstat.st_gid,
"device": fstat.st_dev,
} | 36d8c92a6cc20f95f73f3a2c6c986222f1e9633e | 3,652,761 |
def extract_test_params(root):
"""VFT parameters, e.g. TEST_PATTERN, TEST_STRATEGY, ..."""
res = {}
'''
xpath = STATIC_TEST + '*'
elems = root.findall(xpath) + root.findall(xpath+'/FIXATION_CHECK*')
#return {e.tag:int(e.text) for e in elems if e.text.isdigit()}
print(xpath)
for e in elems:
print(e.tag)
if e.text.isdigit():
res[e.tag] = int(e.text)
elif len(e.text) > 1:
#print(e.tag, e.text,type(e.text),'$'*100)
res[e.tag] =e.text
else:
for ee in e:
if ee.tag not in ['QUESTIONS_ASKED','SF']:
if ee.text.isdigit():
res[ee.tag] = int(ee.text)
elif len(ee.text) > 1:
res[ee.tag] = ee.text
'''
for p in params:
xpath = STATIC_TEST + p
el = root.findall(xpath)
if not el:
res[p.split('/')[-1]] =''
elif el[0].text.isdigit():
res[el[0].tag] = int(el[0].text)
else:
res[el[0].tag] = el[0].text
for pth in [DISPLAY_NAME,VISIT_DATE,SERIES_DATE_TIME,TEST_NODE+'PUPIL_DIAMETER',TEST_NODE+'PUPIL_DIAMETER_AUTO',TEST_NODE+'EXAM_TIME']:
e=root.find(pth)
if e.text is None:
res[e.tag] = e.text
else:
if e.text.isdigit():
res[e.tag] = int(e.text)
else:
res[e.tag] = e.text
'''
vkind = ['THRESHOLD', 'TOTAL', 'PATTERN']
for vk in vkind:
vs = extract_vft_values(root, vk)
mat = vf2matrix(vs)
res[vk+'_MATRIX'] = [mat]
'''
return res | ebd0e1d86af8d741ff993fc54b6ef4b3a7be6ac4 | 3,652,762 |
from typing import Optional
from typing import List
def csc_list(
city: str,
state: Optional[str] = None,
country: Optional[str] = None,
) -> List[db.Geoname]:
"""
>>> [g.country_code for g in csc_list('sydney')]
['AU', 'CA', 'US', 'US', 'ZA', 'VU', 'US', 'US', 'CA']
>>> [g.name for g in csc_list('sydney', country='australia')]
['Sydney']
>>> [g.timezone for g in csc_list('sydney', state='victoria')][:3]
['Australia/Sydney', 'America/Glace_Bay', 'America/Phoenix']
"""
if state and country:
cinfo = db.country_info(country)
states = [
g for g in db.select_geonames_name(state)
if g.feature_class == 'A' and g.country_code == cinfo.iso
]
cities = [
g for g in db.select_geonames_name(city)
if g.feature_class == 'P' and g.country_code == cinfo.iso
]
city_matches = list(_match(cities, states))
if city_matches:
return [c for (c, _) in city_matches]
#
# Try omitting state. If the country is specified, that alone may be sufficient.
#
if country:
cinfo = db.country_info(country)
cities = [
g for g in db.select_geonames_name(city)
if g.feature_class == 'P' and g.country_code == cinfo.iso
]
if cities:
return cities
#
# Perhaps state is really a city?
#
if state and country:
cinfo = db.country_info(country)
cities = [
g for g in db.select_geonames_name(state)
if g.country_code == cinfo.iso
]
if cities:
return cities
#
# Perhaps the specified country is wrong?
#
if state:
states = [g for g in db.select_geonames_name(state) if g.feature_class == 'A']
cities = [g for g in db.select_geonames_name(city) if g.feature_class == 'P']
city_matches = list(_match(cities, states))
if city_matches:
return [c for (c, _) in city_matches]
#
# Perhaps city itself is unique?
#
cities = [g for g in db.select_geonames_name(city) if g.feature_class == 'P']
if cities:
return cities
return list(db.select_geonames_name(city)) | 6c27a16c22a40d095bd3e3fad7660bbee867751e | 3,652,763 |
from typing import Iterable
from typing import Tuple
def calculate_frame_score(current_frame_hsv: Iterable[cupy.ndarray],
last_frame_hsv: Iterable[cupy.ndarray]) -> Tuple[float]:
"""Calculates score between two adjacent frames in the HSV colourspace. Frames should be
split, e.g. cv2.split(cv2.cvtColor(frame_data, cv2.COLOR_BGR2HSV)).
Arguments:
curr_frame_hsv: Current frame.
last_frame_hsv: Previous frame.
Returns:
Tuple containing the average pixel change for each component as well as the average
across all components, e.g. (avg_h, avg_s, avg_v, avg_all).
"""
current_frame_hsv = [x.astype(cupy.int32) for x in current_frame_hsv]
last_frame_hsv = [x.astype(cupy.int32) for x in last_frame_hsv]
delta_hsv = [0, 0, 0, 0]
for i in range(3):
num_pixels = current_frame_hsv[i].shape[0] * current_frame_hsv[i].shape[1]
delta_hsv[i] = cupy.sum(
cupy.abs(current_frame_hsv[i] - last_frame_hsv[i])) / float(num_pixels)
delta_hsv[3] = sum(delta_hsv[0:3]) / 3.0
return tuple(delta_hsv) | db5819ab0696364569f79f326ab7e28f0f0371b3 | 3,652,764 |
def huber_loss_function(sq_resi, k=1.345):
"""Robust loss function which penalises outliers, as detailed in Jankowski et al (2018).
Parameters
----------
sq_resi : `float` or `list`
A single or list of the squared residuals.
k : `float`, optional
A constant that defines at which distance the loss function starts to penalize outliers. |br| Default: 1.345.
Returns
-------
rho : `float` or `list`
The modified squared residuals.
"""
single_value = False
if isinstance(sq_resi, float) or isinstance(sq_resi, int):
sq_resi = np.array([sq_resi])
single_value = True
elif isinstance(sq_resi, list):
sq_resi = np.array(sq_resi)
rho = []
residual = np.sqrt(abs(sq_resi))
for j in range(len(residual)):
if residual[j] < k:
rho.append( sq_resi[j]/2 )
else:
rho.append( k * residual[j] - 1./2. * k**2 )
if single_value:
return rho[0]
else:
return rho | bf8d5f3aa042297014b7b93316fe557784c4c5b1 | 3,652,765 |
import re
def clean_sentence(sentence: str) -> str:
"""
Bertに入れる前にtextに行う前処理
Args:
sentence (str): [description]
Returns:
str: [description]
"""
sentence = re.sub(r"<[^>]*?>", "", sentence) # タグ除外
sentence = mojimoji.zen_to_han(sentence, kana=False)
sentence = neologdn.normalize(sentence)
sentence = re.sub(
r'[!"#$%&\'\\\\()*+,\-./:;<=>?@\[\]\^\_\`{|}~「」〔〕“”〈〉『』【】&*・()$#@?!`+¥%︰-@]。、♪',
" ",
sentence,
) # 記号
sentence = re.sub(r"https?://[\w/:%#\$&\?\(\)~\.=\+\-]+", "", sentence)
sentence = re.sub(r"[0-90-9a-zA-Za-zA-Z]+", " ", sentence)
sentence = "".join(
[
emoji_dict[c].get("short_name", "") if c in emoji.UNICODE_EMOJI["en"] else c
for c in sentence
]
)
return sentence | bf5f9df5ab04ff96ae7f8199dfbeafae30d764eb | 3,652,766 |
from typing import Union
from enum import Enum
def assert_user(user_id: int, permission: Union[str, Enum] = None) -> bool:
"""
Assert that a user_id belongs to the requesting user, or that
the requesting user has a given permission.
"""
permission = (
permission.value if isinstance(permission, Enum) else permission
)
return flask.g.user.id == user_id or flask.g.user.has_permission(
permission
) | 6ef54d60a0b62e4ffb1330dba7bffeeac0df03c7 | 3,652,767 |
def single_prob(n, n0, psi, c=2):
"""
Eq. 1.3 in Conlisk et al. (2007), note that this implmentation is
only correct when the variable c = 2
Note: if psi = .5 this is the special HEAP case in which the
function no longer depends on n.
c = number of cells
"""
a = (1 - psi) / psi
F = (get_F(a, n) * get_F((c - 1) * a, n0 - n)) / get_F(c * a, n0)
return float(F) | 05c0c627a05bb683fa3c20cacefa121f5cddba14 | 3,652,768 |
def array_pair_sum_iterative(arr, k):
"""
returns the array of pairs using an iterative method.
complexity: O(n^2)
"""
result = []
for i in range(len(arr)):
for j in range(i + 1, len(arr)):
if arr[i] + arr[j] == k:
result.append([arr[i], arr[j]])
return result | c4f0eb5e290c784a8132472d85023662be291a71 | 3,652,769 |
def merge_named_payload(name_to_merge_op):
"""Merging dictionary payload by key.
name_to_merge_op is a dict mapping from field names to merge_ops.
Example:
If name_to_merge_op is
{
'f1': mergeop1,
'f2': mergeop2,
'f3': mergeop3
},
Then two payloads { 'f1': a1, 'f2': b1, 'f3': c1 } and
{ 'f1': a2, 'f2': b2, 'f3': c2 } will be merged into
{
'f1': mergeop1(a1, a2),
'f2': mergeop2(b1, b2),
'f3': mergeop3(c1, c2)
}.
"""
def merge(p1,p2):
p = {}
for name, op in name_to_merge_op.items():
p[name] = op(p1[name], p2[name])
return p
return merge | ee20147b7937dff208da6ea0d025fe466d8e92ed | 3,652,770 |
def euclidean_distance(this_set, other_set, bsf_dist):
"""Calculate the Euclidean distance between 2 1-D arrays.
If the distance is larger than bsf_dist, then we end the calculation and return the bsf_dist.
Args:
this_set: ndarray
The array
other_set: ndarray
The comparative array.
bsf_dist:
The best so far distance.
Returns:
output: float
The accumulation of Euclidean distance.
"""
sum_dist = 0
for index in range(0, len(this_set)):
sum_dist += (this_set[index] - other_set[index]) ** 2
if sum_dist > bsf_dist:
return bsf_dist
return sum_dist | 7055c0de77cad987738c9b3ec89b0381002fbfd4 | 3,652,771 |
from typing import List
from typing import Union
import subprocess
def run_cmd_simple(cmd: str,
variables: dict,
env=None,
args: List[str] = None,
libraries=None) -> Union[dict, str]:
"""
Run cmd with variables written in environment.
:param args: cmd arguments
:param cmd: to run
:param variables: variables
:param env: custom environment
:param libraries: additional libraries used for source compilation
:return: output in json (if can be parsed) or plaintext
"""
env = _prepare_env(variables, env=env)
cmd, cwd = _prepare_cmd(cmd, args, variables, libraries=libraries)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, cwd=cwd)
if p.wait() == 0:
out = p.stdout.read().decode()
debug(out)
return _parse_output(out)
else:
out = p.stdout.read().decode()
warning(out)
raise Exception('Execution failed.') | b16693f291ade54f470e4c7173cf06cca774cdf6 | 3,652,772 |
def host(provider: Provider) -> Host:
"""Create host"""
return provider.host_create(utils.random_string()) | 36e1b6f0ddf8edc055d56cac746271f5d3801111 | 3,652,773 |
def bj_struktur_p89(x, n: int = 5, **s): # brute force
"""_summary_
:param x: _description_
:type x: _type_
:param n: _description_, defaults to 5
:type n: int, optional
:return: _description_
:rtype: _type_
"""
gamma, K = gamma_K_function(**s)
b_j = np.empty((x.size, n + 1))
for i, xi in enumerate(x):
for j in range(n + 1):
b_j[i, j] = bj_p89(K, xi, j)
return b_j | d21fc501411ada9f2173da7ca447418e2f51a86f | 3,652,774 |
def _get_pulse_width_and_area(tr, ipick, icross, max_pulse_duration=.08):
"""
Measure the width & area of the arrival pulse on the displacement trace
Start from the displacement peak index (=icross - location of first zero
crossing of velocity)
:param tr: displacement trace
:type tr: obspy.core.trace.Trace or microquake.core.Trace
:param ipick: index of pick in trace
:type ipick: int
:param icross: index of first zero crossing in corresponding velocity trace
:type icross: int
:param max_pulse_duration: max allowed duration (sec) beyond pick to search
for zero crossing of disp pulse
:type max_pulse_duration: float
return pulse_width, pulse_area
:returns: pulse_width, pulse_area: Returns the width and area of the
displacement pulse
:rtype: float, float
"""
fname = '_get_pulse_width_and_area'
data = tr.data
sign = np.sign(data)
nmax = int(max_pulse_duration * tr.stats.sampling_rate)
iend = ipick + nmax
epsilon = 1e-10
if icross >= iend:
i = iend - 1
for i in range(icross, iend):
diff = np.abs(data[i] - data[ipick])
if diff < epsilon or sign[i] != sign[icross]:
break
if i == iend - 1:
logger.info("%s: Unable to locate termination of displacement "
"pulse for tr:%s!" % (fname, tr.get_id()))
return 0, 0
istop = i
pulse_width = float(istop - ipick) * tr.stats.delta
pulse_area = np.trapz(data[ipick:istop], dx=tr.stats.delta)
return pulse_width, pulse_area | 43598f797f2956def740881b33b38d8824ba7ff3 | 3,652,775 |
def load_backend(name, options=None):
"""Load the named backend.
Returns the backend class registered for the name.
If you pass None as the name, this will load the default backend.
See the documenation for get_default() for more information.
Raises:
UnknownBackend: The name is not recognized.
LoadingError: There was an error loading the backend.
"""
if name is None:
assert options is None
return get_default()
if options is None:
options = {}
if name not in _backends:
raise UnknownBackend(name)
options = _backends[name][1](**options)
key = (name, tuple(sorted(list(options.items()))))
res = _active_backends.get(key, None)
if res is None:
try:
res = _backends[name][0](options)
_active_backends[key] = res
except Exception as e:
raise LoadingError(name) from e
return res | 1df4c1b0c0d9d81e607a5884f3391883ab6ea3c5 | 3,652,776 |
def test() -> ScadObject:
"""
Create something.
"""
result = IDUObject()
result += box(10, 10, 5, center=True).translated((0, 0, -1)).named("Translated big box")
result -= box(4, 4, 4, center=True)
result += box(10, 10, 5)
result *= sphere(7).translated((0, 0, 1))
return (
result.rotated((-45, 0, 0))
.rendered(10)
.commented("Render it now!")
.colored("green", alpha=0.5)
.commented(
"""
This file is autogenerated by r7scad.
It is not supposed to be edited manually.
"""
)
) | 2d8c413a6b60969de60746c4fb356da88a95e06a | 3,652,777 |
def rollout(policy, env_class, step_fn=default_rollout_step, max_steps=None):
"""Perform rollout using provided policy and env.
:param policy: policy to use when simulating these episodes.
:param env_class: class to instantiate an env object from.
:param step_fn: a function to be called at each step of rollout.
The function can have 2 or 3 parameters, and must return an action:
* 2 parameter definition: policy, observation.
* 3 parameter definition: policy, observation, step_num.
Default value is ``agentos.core.default_rollout_step``.
:param max_steps: cap on number of steps per episode.
:return: the trajectory that was followed during this rollout.
A trajectory is a named tuple that contains the initial observation (a
scalar) as well as the following arrays: actions, observations,
rewards, dones, contexts. The ith entry of each array corresponds to
the action taken at the ith step of the rollout, and the respective
results returned by the environment after taking that action. To learn
more about the semantics of these, see the documentation and code of
gym.Env.
"""
actions = []
observations = []
rewards = []
dones = []
contexts = []
env = env_class()
obs = env.reset()
init_obs = obs
done = False
step_num = 0
while True:
if done or (max_steps and step_num >= max_steps):
break
if step_fn.__code__.co_argcount == 2:
action = step_fn(policy, obs)
elif step_fn.__code__.co_argcount == 3:
action = step_fn(policy, obs, step_num)
else:
raise TypeError("step_fn must accept 2 or 3 parameters.")
obs, reward, done, ctx = env.step(action)
actions.append(action)
observations.append(obs)
rewards.append(reward)
dones.append(done)
contexts.append(ctx)
step_num += 1
Trajectory = namedtuple(
"Trajectory",
[
"init_obs",
"actions",
"observations",
"rewards",
"dones",
"contexts",
],
)
return Trajectory(
init_obs, actions, observations, rewards, dones, contexts
) | d5ac3246338165d3cfdb5e37ae5a6cbbe5df0408 | 3,652,778 |
def get_source(location, **kwargs):
"""Factory for StubSource Instance.
Args:
location (str): PathLike object or valid URL
Returns:
obj: Either Local or Remote StubSource Instance
"""
try:
utils.ensure_existing_dir(location)
except NotADirectoryError:
return RemoteStubSource(location, **kwargs)
else:
return LocalStubSource(location, **kwargs) | 6b240d7ad523c2a45ca21c3030a96ec5aebb69c2 | 3,652,779 |
def about(request):
"""
Prepare and displays the about view of the web application.
Args:
request: django HttpRequest class
Returns:
A django HttpResponse class
"""
template = loader.get_template('about.html')
return HttpResponse(template.render()) | ecf2a890e49a5fe786024f7d7f524e1396064f48 | 3,652,780 |
def url(parser, token):
"""Overwrites built in url tag to use . It works identicaly, except that where possible
it will use subdomains to refer to a project instead of a full url path.
For example, if the subdomain is vessel12.domain.com it will refer to a page
'details' as /details/ instead of /site/vessel12/details/
REQUIREMENTS:
* MIDDLEWARE_CLASSES in settings should contain
'core.middleware.subdomain.SubdomainMiddleware'
* These keys should be in the django settings file:
SUBDOMAIN_IS_PROJECTNAME = True
MAIN_HOST_NAME = <your site's hostname>
* APACHE url rewriting should be in effect to rewrite subdomain to
site/project/. To get you started: the following apache config does this
for the domain 'devcomicframework.org'
(put this in your apache config file)
RewriteEngine on
RewriteCond $1 .*/$
RewriteCond $1 !^/site/.*
RewriteCond %{HTTP_HOST} !^devcomicframework\.org$
RewriteCond %{HTTP_HOST} !^www.devcomicframework\.org$
RewriteCond %{HTTP_HOST} ^([^.]+)\.devcomicframework\.org$
RewriteRule (.*) /site/%1$1 [PT]
TODO: turn on and off this behaviour in settings, maybe explicitly define
base domain to also make it possible to use dots in the base domain.
"""
orgnode = defaulttags.url(parser, token)
return comic_URLNode(
orgnode.view_name, orgnode.args, orgnode.kwargs, orgnode.asvar
) | 191c7598cdf079fe97452d4914534191e7eb1fe4 | 3,652,781 |
import math
import logging
def getAp(ground_truth, predict, fullEval=False):
"""
Calculate AP at IOU=.50:.05:.95, AP at IOU=.50, AP at IOU=.75
:param ground_truth: {img_id1:{{'position': 4x2 array, 'is_matched': 0 or 1}, {...}, ...}, img_id2:{...}, ...}
:param predict: [{'position':4x2 array, 'img_id': image Id, 'confident': confident}, {...}, ...]
:return: AP, AP at IOU=.50, AP at IOU=.75
"""
is_match = {'is_matched': 0}
ap_050_095 = 0.
ap_050 = 0.
ap_075 = 0.
prec_050_095 = 0.
prec_050 = 0.
prec_075 = 0.
recall_050_095 = 0.
recall_050 = 0.
recall_075 = 0.
if fullEval:
for i in np.arange(0.50, 1.0, 0.05):
for key in ground_truth:
for win_idx in range(len(ground_truth[key])):
ground_truth[key][win_idx].update(is_match) # reset 'is_matched' for all windows
ap, recall, precision = evaluateAP(ground_truth, predict, threshold=i)
if math.isclose(round(i, 2), 0.5):
ap_050 = ap
prec_050 = precision
recall_050 = recall
if math.isclose(round(i, 2), 0.75):
ap_075 = ap
prec_075 = precision
recall_075 = recall
ap_050_095 += ap
prec_050_095 += precision
recall_050_095 += recall
logging.info("threshold:%.2f"%i + " precsion:%.2f"%(precision*100) + " recall:%.2f"%(recall*100))
else:
ap_050, recall_050, prec_050 = evaluateAP(ground_truth, predict, threshold=0.5)
ap_050_095 = ap_050_095 / 10
prec_050_095 = prec_050_095 / 10
recall_050_095 = recall_050_095 / 10
return [ap_050_095, ap_050, ap_075], \
[prec_050_095, prec_050, prec_075], \
[recall_050_095, recall_050, recall_075] | ac44c514166f8e70a6625f4e1ad89b36564ffba4 | 3,652,782 |
def aumenta_fome(ani):
""" aumenta_fome: animal --> animal
Recebe um animal e devolve o mesmo com o valor da fome incrementado por 1
"""
if obter_freq_alimentacao(ani) == 0:
return ani
else:
ani['a'][0] += 1
return ani | 377e3800e12877f1b8cd1cba19fe3a430ade0207 | 3,652,783 |
import warnings
def match_inputs(
bp_tree,
table,
sample_metadata,
feature_metadata=None,
ignore_missing_samples=False,
filter_missing_features=False
):
"""Matches various input sources.
Also "splits up" the feature metadata, first by calling
taxonomy_utils.split_taxonomy() on it and then by splitting the resulting
DataFrame into two separate DataFrames (one for tips and one for internal
nodes).
Parameters
----------
bp_tree: bp.BP
The tree to be visualized.
table: pd.DataFrame
Representation of the feature table. The index should describe feature
IDs; the columns should describe sample IDs. (It's expected that
feature IDs in the table only describe tips in the tree, not internal
nodes.)
sample_metadata: pd.DataFrame
Sample metadata. The index should describe sample IDs; the columns
should describe different sample metadata fields' names.
feature_metadata: pd.DataFrame or None
Feature metadata. If this is passed, the index should describe feature
IDs and the columns should describe different feature metadata fields'
names. (Feature IDs here can describe tips or internal nodes in the
tree.)
ignore_missing_samples: bool
If True, pads missing samples (i.e. samples in the table but not the
metadata) with placeholder metadata. If False, raises a
DataMatchingError if any such samples exist. (Note that in either case,
samples in the metadata but not in the table are filtered out; and if
no samples are shared between the table and metadata, a
DataMatchingError is raised regardless.) This is analogous to the
ignore_missing_samples flag in Emperor.
filter_missing_features: bool
If True, filters features from the table that aren't present as tips in
the tree. If False, raises a DataMatchingError if any such features
exist. (Note that in either case, features in the tree but not in the
table are preserved.)
Returns
-------
(table, sample_metadata, tip_metadata, int_metadata):
(pd.DataFrame, pd.DataFrame, pd.DataFrame / None, pd.DataFrame / None)
Versions of the input table, sample metadata, and feature metadata
filtered such that:
-The table only contains features also present as tips in the tree.
-The sample metadata only contains samples also present in the
table.
-Samples present in the table but not in the sample metadata will
have all of their sample metadata values set to "This sample has
no metadata". (This will only be done if ignore_missing_samples is
True; otherwise, this situation will trigger an error. See below.)
-If feature metadata was not passed, tip_metadata and int_metadata
will both be None. Otherwise, tip_metadata will contain the
entries of the feature metadata where the feature name was present
as a tip in the tree, and int_metadata will contain the entries
of the feature metadata where the feature name was present as
internal node(s) in the tree.
-Also, for sanity's sake, this will call
taxonomy_utils.split_taxonomy() on the feature metadata before
splitting it up into tip and internal node metadata.
Raises
------
DataMatchingError
If any of the following conditions are met:
1. No features are shared between the tree's tips and table.
2. There are features present in the table but not as tips in the
tree, AND filter_missing_features is False.
3. No samples are shared between the sample metadata and table.
4. There are samples present in the table but not in the sample
metadata, AND ignore_missing_samples is False.
5. The feature metadata was passed, but no features present in it
are also present as tips or internal nodes in the tree.
References
----------
This function was based on match_table_and_data() in Qurro's code:
https://github.com/biocore/qurro/blob/b9613534b2125c2e7ee22e79fdff311812f4fefe/qurro/_df_utils.py#L255
"""
# Match table and tree.
# (Ignore None-named tips in the tree, which will be replaced later on
# with "default" names like "EmpressNode0".)
tip_names = set(bp_tree.bp_tree_tips())
tree_and_table_features = table.index.intersection(tip_names)
if len(tree_and_table_features) == 0:
# Error condition 1
raise DataMatchingError(
"No features in the feature table are present as tips in the tree."
)
ff_table = table.copy()
if len(tree_and_table_features) < len(table.index):
if filter_missing_features:
# Filter table to just features that are also present in the tree.
#
# Note that we *don't* filter the tree analogously, because it's ok
# for the tree's nodes to be a superset of the table's features
# (and this is going to be the case in most datasets where the
# features correspond to tips, since internal nodes aren't
# explicitly described in the feature table).
ff_table = table.loc[tree_and_table_features]
# Report to user about any dropped features from table.
dropped_feature_ct = table.shape[0] - ff_table.shape[0]
warnings.warn(
(
"{} feature(s) in the table were not present as tips in "
"the tree. These feature(s) have been removed from the "
"visualization."
).format(
dropped_feature_ct
),
DataMatchingWarning
)
else:
# Error condition 2
raise DataMatchingError(
"The feature table contains features that aren't present as "
"tips in the tree. You can override this error by using the "
"--p-filter-missing-features flag."
)
# Match table (post-feature-filtering, if done) and sample metadata.
table_samples = set(ff_table.columns)
sm_samples = set(sample_metadata.index)
sm_and_table_samples = sm_samples & table_samples
if len(sm_and_table_samples) == 0:
# Error condition 3
raise DataMatchingError(
"No samples in the feature table are present in the sample "
"metadata."
)
padded_metadata = sample_metadata.copy()
if len(sm_and_table_samples) < len(ff_table.columns):
if ignore_missing_samples:
# Works similarly to how Emperor does this: see
# https://github.com/biocore/emperor/blob/659b62a9f02a6423b6258c814d0e83dbfd05220e/emperor/core.py#L350
samples_without_metadata = table_samples - sm_samples
padded_metadata = pd.DataFrame(
index=samples_without_metadata,
columns=sample_metadata.columns,
dtype=str
)
padded_metadata.fillna("This sample has no metadata", inplace=True)
sample_metadata = pd.concat([sample_metadata, padded_metadata])
# Report to user about samples we needed to "pad."
warnings.warn(
(
"{} sample(s) in the table were not present in the "
"sample metadata. These sample(s) have been assigned "
"placeholder metadata."
).format(
len(samples_without_metadata)
),
DataMatchingWarning
)
else:
# Error condition 4
raise DataMatchingError(
"The feature table contains samples that aren't present in "
"the sample metadata. You can override this error by using "
"the --p-ignore-missing-samples flag."
)
# If we've made it this far, then there must be at least *one* sample
# present in both the sample metadata and the table: and by this point the
# metadata's samples should be a superset of the table's samples (since we
# padded the metadata above if there were any samples that *weren't* in the
# table).
#
# All that's left to do is to filter the sample metadata to just the
# samples that are also present in the table.
sf_sample_metadata = sample_metadata.loc[ff_table.columns]
# If desired, we could report here to the user about any dropped samples
# from the metadata by looking at the difference between
# sample_metadata.shape[0] and sf_sample_metadata.shape[0]. However, the
# presence of such "dropped samples" is a common occurrence in 16S studies,
# so we currently don't do that for the sake of avoiding alarm fatigue.
# If the feature metadata was passed, filter it so that it only contains
# features present as tips / internal nodes in the tree
tip_metadata = None
int_metadata = None
if feature_metadata is not None:
# Split up taxonomy column, if present in the feature metadata
ts_feature_metadata = taxonomy_utils.split_taxonomy(feature_metadata)
fm_ids = ts_feature_metadata.index
# Subset tip metadata
fm_and_tip_features = fm_ids.intersection(tip_names)
tip_metadata = ts_feature_metadata.loc[fm_and_tip_features]
# Subset internal node metadata
internal_node_names = set(bp_tree.bp_tree_non_tips())
fm_and_int_features = fm_ids.intersection(internal_node_names)
int_metadata = ts_feature_metadata.loc[fm_and_int_features]
if len(tip_metadata.index) == 0 and len(int_metadata.index) == 0:
# Error condition 5
raise DataMatchingError(
"No features in the feature metadata are present in the tree, "
"either as tips or as internal nodes."
)
return ff_table, sf_sample_metadata, tip_metadata, int_metadata | 92a97fc39c233a0969c24774d74fdd6b304f5442 | 3,652,784 |
def im_adjust(img, tol=1, bit=8):
"""
Adjust contrast of the image
"""
limit = np.percentile(img, [tol, 100 - tol])
im_adjusted = im_bit_convert(img, bit=bit, norm=True, limit=limit.tolist())
return im_adjusted | 2bbccc08d4dd6aeed50c6fb505ff801e3201c73a | 3,652,785 |
import math
def FibanocciSphere(samples=1):
""" Return a Fibanocci sphere with N number of points on the surface.
This will act as the template for the nanoparticle core.
Args:
Placeholder
Returns:
Placeholder
Raises:
Placeholder
"""
points = []
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
for i in range(samples):
y = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = math.sqrt(1 - y * y) # radius at y
theta = phi * i # golden angle increment
x = math.cos(theta) * radius
z = math.sin(theta) * radius
points.append((x, y, z))
return points | ea47b7c2eed34bd826ddff1619adac887439f5e0 | 3,652,786 |
import inspect
def get_code():
"""
returns the code for the activity_selection function
"""
return inspect.getsource(activity_selection) | 3bae49b5feea34813c518a3ec3a62a4cde35445f | 3,652,787 |
def calc_luminosity(flux, fluxerr, mu):
""" Normalise flux light curves with distance modulus.
Parameters
----------
flux : array
List of floating point flux values.
fluxerr : array
List of floating point flux errors.
mu : float
Distance modulus from luminosity distance.
Returns
-------
fluxout : array
Same shape as input flux.
fluxerrout : array
Same shape as input fluxerr.
"""
d = 10 ** (mu/5 + 1)
dsquared = d**2
norm = 1e18
fluxout = flux * (4 * np.pi * dsquared/norm)
fluxerrout = fluxerr * (4 * np.pi * dsquared/norm)
return fluxout, fluxerrout | 8cfebee024ae73355daf64b96260d45e57115c8f | 3,652,788 |
from typing import Optional
import os
def download_file(url: str, destination: str, timeout: Optional[int] = None,
silent: Optional[bool] = False) -> str:
"""
Downloads file by given URL to destination dir.
"""
file_name = get_file_name_from_url(url)
file_path = join(destination, file_name)
parsed_url: ParseResult = urlparse(url)
with urlopen(url, timeout=timeout) as resp:
code: int = resp.getcode()
if parsed_url.scheme != 'file' and code != 200:
raise IOError(f'Bad HTTP response code: {code}')
total = int(resp.getheader('Content-Length')) if parsed_url.scheme != 'file' \
else os.path.getsize(parsed_url.path)
if not isfile(file_path) or getsize(file_path) != total:
if not silent:
echo(f'Downloading {file_name}')
with open(file_path, 'wb') as file, \
progressbar(length=total,
width=PROGRESS_BAR_WIDTH,
bar_template=PROGRESS_BAR_TEMPLATE) as progress_bar:
while True:
chunk = resp.read(CHUNK_SIZE)
if not chunk:
break
file.write(chunk)
if not silent:
progress_bar.update(len(chunk))
return file_path | c49e8974b01c101cf0d0a0defe572ca8f65b780a | 3,652,789 |
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
###
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
#xavier = tf.contrib.layers.xavier_initializer_conv2d()
with tf.variable_scope('conv1') as scope:
kernel1 = _variable_with_weight_decay('weights',
shape=[3, 3, 3, 128],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(images, kernel1, [1, 2, 2, 1], padding='SAME')
#conv = tf.nn.dropout(conv, 0.9)
biases1 = cifar10._variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases1)
conv1 = tf.nn.relu(pre_activation, name = scope.name)
cifar10._activation_summary(conv1)
norm1 = tf.contrib.layers.batch_norm(conv1, scale=True, is_training=True, updates_collections=None)
# conv2
with tf.variable_scope('conv2') as scope:
kernel2 = _variable_with_weight_decay('weights',
shape=[5, 5, 128, 128],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(norm1, kernel2, [1, 1, 1, 1], padding='SAME')
biases2 = cifar10._variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases2)
conv2 = tf.nn.relu(pre_activation, name = scope.name)
#conv2 = tf.nn.dropout(conv2, 0.9)
cifar10._activation_summary(conv2)
# concat conv2 with norm1 to increase the number of features, this step does not affect the Differential_Privacy preserving guarantee
current = tf.concat((conv2, norm1), axis=3)
# norm2
norm2 = tf.contrib.layers.batch_norm(current, scale=True, is_training=True, updates_collections=None)
# conv3
with tf.variable_scope('conv3') as scope:
kernel3 = _variable_with_weight_decay('weights',
shape=[5, 5, 256, 256],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(norm2, kernel3, [1, 1, 1, 1], padding='SAME')
biases3 = cifar10._variable_on_cpu('biases', [256], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases3)
conv3 = tf.nn.relu(pre_activation, name = scope.name)
#conv3 = tf.nn.dropout(conv3, 0.9)
cifar10._activation_summary(conv3)
# norm3
norm3 = tf.contrib.layers.batch_norm(conv3, scale=True, is_training=True, updates_collections=None)
#pool3, row_pooling_sequence, col_pooling_sequence = tf.nn.fractional_max_pool(norm3, pooling_ratio=[1.0, 2.0, 2.0, 1.0])
pool3 = avg_pool(norm3, 2)
# local4
with tf.variable_scope('local4') as scope:
weights1 = cifar10._variable_with_weight_decay('weights', shape=[5 * 5 * 256, hk],
stddev=0.04, wd=None)
biases4 = cifar10._variable_on_cpu('biases', [hk], tf.constant_initializer(0.1))
h_pool2_flat = tf.reshape(pool3, [-1, 5*5*256]);
z2 = tf.add(tf.matmul(h_pool2_flat, weights1), biases4, name=scope.name)
#Applying normalization for the flat connected layer h_fc1#
batch_mean2, batch_var2 = tf.nn.moments(z2,[0])
scale2 = tf.Variable(tf.ones([hk]))
beta2 = tf.Variable(tf.zeros([hk]))
BN_norm = tf.nn.batch_normalization(z2,batch_mean2,batch_var2,beta2,scale2,1e-3)
###
local4 = max_out(BN_norm, hk)
cifar10._activation_summary(local4)
"""print(images.get_shape());
print(norm1.get_shape());
print(norm2.get_shape());
print(pool3.get_shape());
print(local4.get_shape());"""
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
weights2 = cifar10._variable_with_weight_decay('weights', [hk, 10],
stddev=1/(hk*1.0), wd=0.0)
biases5 = cifar10._variable_on_cpu('biases', [10],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights2), biases5, name=scope.name)
cifar10._activation_summary(softmax_linear)
return softmax_linear | 224c6792b4f2b066d8627d222e6f89b469921de3 | 3,652,790 |
def cluster_molecules(mols, cutoff=0.6):
"""
Cluster molecules by fingerprint distance using the Butina algorithm.
Parameters
----------
mols : list of rdkit.Chem.rdchem.Mol
List of molecules.
cutoff : float
Distance cutoff Butina clustering.
Returns
-------
pandas.DataFrame
Table with cluster ID - molecule ID pairs.
"""
# Generate fingerprints
fingerprints = _generate_fingerprints(mols)
# Calculate Tanimoto distance matrix
distance_matrix = _get_tanimoto_distance_matrix(fingerprints)
# Now cluster the data with the implemented Butina algorithm
clusters = Butina.ClusterData(distance_matrix, len(fingerprints), cutoff, isDistData=True)
# Sort clusters by size
clusters = sorted(clusters, key=len, reverse=True)
# Get cluster ID - molecule ID pairs
clustered_molecules = []
for cluster_id, molecule_ids in enumerate(clusters, start=1):
for cluster_member_id, molecule_id in enumerate(molecule_ids, start=1):
clustered_molecules.append([cluster_id, cluster_member_id, molecule_id])
clustered_molecules = pd.DataFrame(
clustered_molecules, columns=["cluster_id", "cluster_member_id", "molecule_id"]
)
# Print details on clustering
print("Number of molecules:", len(fingerprints))
print("Threshold: ", cutoff)
print("Number of clusters: ", len(clusters))
print(
"# Clusters with only 1 molecule: ",
len([cluster for cluster in clusters if len(cluster) == 1]),
)
print(
"# Clusters with more than 5 molecules: ",
len([cluster for cluster in clusters if len(cluster) > 5]),
)
print(
"# Clusters with more than 25 molecules: ",
len([cluster for cluster in clusters if len(cluster) > 25]),
)
print(
"# Clusters with more than 100 molecules: ",
len([cluster for cluster in clusters if len(cluster) > 100]),
)
return clustered_molecules | ba98342d10512b4ee08e756644a26bc8585f5abc | 3,652,791 |
import timeit
def exec_benchmarks_empty_inspection(code_to_benchmark, repeats):
"""
Benchmark some code without mlinspect and with mlinspect with varying numbers of inspections
"""
benchmark_results = {
"no mlinspect": timeit.repeat(stmt=code_to_benchmark.benchmark_exec, setup=code_to_benchmark.benchmark_setup,
repeat=repeats, number=1),
"no inspection": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str, "[]",
repeats),
"one inspection": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str,
"[EmptyInspection(0)]", repeats),
"two inspections": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str,
"[EmptyInspection(0), EmptyInspection(1)]", repeats),
"three inspections": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str,
"[EmptyInspection(0), " +
"EmptyInspection(1), EmptyInspection(2)]", repeats)}
return benchmark_results | c4038b98968c9c44b5cbd0bfc9e92654dae8aca2 | 3,652,792 |
def detect_version():
"""
Try to detect the main package/module version by looking at:
module.__version__
otherwise, return 'dev'
"""
try:
m = __import__(package_name, fromlist=['__version__'])
return getattr(m, '__version__', 'dev')
except ImportError:
pass
return 'dev' | c9cb3a30d84c7e9118df46dcc73ce37278788db5 | 3,652,793 |
def model(p, x):
""" Evaluate the model given an X array """
return p[0] + p[1]*x + p[2]*x**2. + p[3]*x**3. | fe923f6f6aea907d3dc07756813ed848fbcc2ac6 | 3,652,794 |
def normalize(x:"tensor|np.ndarray") -> "tensor|np.ndarray":
"""Min-max normalization (0-1):
:param x:"tensor|np.ndarray":
:returns: Union[Tensor,np.ndarray] - Return same type as input but scaled between 0 - 1
"""
return (x - x.min())/(x.max()-x.min()) | 6230077008c084bdcbebfc32d25251564c4266f0 | 3,652,795 |
import warnings
import Bio
def apply_on_multi_fasta(file, function, *args):
"""Apply a function on each sequence in a multiple FASTA file (DEPRECATED).
file - filename of a FASTA format file
function - the function you wish to invoke on each record
*args - any extra arguments you want passed to the function
This function will iterate over each record in a FASTA file as SeqRecord
objects, calling your function with the record (and supplied args) as
arguments.
This function returns a list. For those records where your function
returns a value, this is taken as a sequence and used to construct a
FASTA format string. If your function never has a return value, this
means apply_on_multi_fasta will return an empty list.
"""
warnings.warn("apply_on_multi_fasta is deprecated", Bio.BiopythonDeprecationWarning)
try:
f = globals()[function]
except:
raise NotImplementedError("%s not implemented" % function)
handle = open(file, 'r')
records = SeqIO.parse(handle, "fasta")
results = []
for record in records:
arguments = [record.sequence]
for arg in args: arguments.append(arg)
result = f(*arguments)
if result:
results.append('>%s\n%s' % (record.name, result))
handle.close()
return results | e204322e512a0f1eb875d7a6434ab6e3356cff10 | 3,652,796 |
def resize_bbox(box, image_size, resize_size):
"""
Args:
box: iterable (ints) of length 4 (x0, y0, x1, y1)
image_size: iterable (ints) of length 2 (width, height)
resize_size: iterable (ints) of length 2 (width, height)
Returns:
new_box: iterable (ints) of length 4 (x0, y0, x1, y1)
"""
check_box_convention(np.array(box), 'x0y0x1y1')
box_x0, box_y0, box_x1, box_y1 = map(float, box)
image_w, image_h = map(float, image_size)
new_image_w, new_image_h = map(float, resize_size)
newbox_x0 = box_x0 * new_image_w / image_w
newbox_y0 = box_y0 * new_image_h / image_h
newbox_x1 = box_x1 * new_image_w / image_w
newbox_y1 = box_y1 * new_image_h / image_h
return int(newbox_x0), int(newbox_y0), int(newbox_x1), int(newbox_y1) | 3b6a309e6ccf0e244bb5a51a922bcf96303116ea | 3,652,797 |
import time
def perf_counter_ms():
"""Returns a millisecond performance counter"""
return time.perf_counter() * 1_000 | 55f1bbbd8d58593d85f2c6bb4ca4f79ad22f233a | 3,652,798 |
import struct
def make_shutdown_packet( ):
"""Create a shutdown packet."""
packet = struct.pack( "<B", OP_SHUTDOWN );
return packet; | 6d696d76c9aa783e477f65e5c89106b2fff6db6d | 3,652,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.