content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def collect_targets_from_attrs(rule_attrs, attrs):
"""Returns a list of targets from the given attributes."""
result = []
for attr_name in attrs:
_collect_target_from_attr(rule_attrs, attr_name, result)
return [target for target in result if is_valid_aspect_target(target)] | 6be1731049f6970004763f5e9ec7d0a3bde76189 | 3,652,600 |
from typing import Tuple
def extract_codes(text: str) -> Tuple[str, ...]:
"""Extract names of warnings from full warning text."""
match = CODES_PAT.search(text)
if not match:
raise ValueError("No warning code found")
return tuple(match.group(1).split(",")) | 6727049c195197ed2407f30093c362a2c6f35cd4 | 3,652,601 |
def task_list(request, pk):
"""
View to get task list based on user list for forms
"""
user_model = User.objects.filter(is_staff=False)
task_model = Task.objects.filter(user=pk)
user_detail = User.objects.get(pk=pk)
query = request.GET.get('q')
if query:
task_model = task_model.filter(
Q(title__icontains=query)
)
return render(request, 'home.html',
{"user_model": user_model, 'task_model': task_model, 'user_detail': user_detail}) | dbb6545ca66a367b2b3e89a494ac8a9bbdbbb341 | 3,652,602 |
import json
def load_credentials():
"""
load_credentials
:return: dict
"""
with open("credentials.json", "r", encoding="UTF-8") as stream:
content = json.loads(stream.read())
return content | 2f08fc4e897a7c7eb91de804158ee67cd91635d0 | 3,652,603 |
def get_utm_string_from_sr(spatialreference):
"""
return utm zone string from spatial reference instance
"""
zone_number = spatialreference.GetUTMZone()
if zone_number > 0:
return str(zone_number) + 'N'
elif zone_number < 0:
return str(abs(zone_number)) + 'S'
else:
return str(zone_number) | 50f01758f7ee29f1b994d36cda34b6b36157fd9e | 3,652,604 |
def messages_count(name):
"""
Get message count for queue
curl -X GET -H 'Accept: application/json' http://localhost:8080/queues/C13470112/msgs/count
curl -X GET -H 'Accept: application/json' 83.212.127.232:8080/queues/C13470112/msgs/count
"""
conn = get_conn()
queue = conn.get_queue(name)
count = queue.count()
resp = "Queue "+name+" has "+str(count)+" messages\n"
return Response(response=resp, mimetype="application/json") | 86abcbc6a9bb81f0ce8a6a19941761c042f5a7e9 | 3,652,605 |
def return_intersect(cameraList):
"""
Calculates the intersection of the Camera objects in the *cameraList*.
Function returns an empty Camera if there exists no intersection.
Parameters:
cameraList : *list* of *camera.Camera* objects
A list of cameras from the camera.Camera class, each containing
a *poly* and a *coordsList*.
Returns:
intersectCam : *camera.Camera* object
An object from the camera.Camera class that is the
intersection between all cameras in the cameraList. If there
exists no intersection between any cameras in the camerList,
an empty Camera will be returned.
"""
intersectCam = None
for camera in cameraList:
if intersectCam is None: # Initiates the intersectCam variable
intersectCam = camera
else:
intersectCam = intersectCam.intersect(camera)
return intersectCam | a47613b8d79c4a4535cd5e7e07aa3b26dea019a5 | 3,652,606 |
def get_returned_attr_set_node(tree):
"""
Get the NODE_ATTR_SET containing the attributes which are returned by the module
"""
# TODO: fix HACK, currently we assume the node containing `imports` is the returned attr set
# but this may not always be the case?
imports_node = get_imports_node(tree)
imports_key_node, _ = [e for e in imports_node.elems if isinstance(e, syntax_tree.Node)]
imports_key_value_node = tree.get_parent(imports_key_node)
returned_attr_set_node = tree.get_parent(imports_key_value_node)
return returned_attr_set_node | f929e4255fcf914ce2d8c2db5ccd74dac5c842d7 | 3,652,607 |
from sys import version
from datetime import datetime
def choose_time_format_method(expression,format):
"""
:Summary: strftime("%s") is not a valid string formatting method in python,
therefore it works on linux servers but not windows. To handle this, this function
checks for python version and decides what conversion method to use.
the "format" parameter makes sure that that the correct required type is always returned
"""
# if we are running python3.3 or greater
if(version >= version_3_3):
# if the datetime object is offset aware
if(expression.tzinfo != None):
if(format == "str"):
return str(int(expression.timestamp()))
else:
return int(expression.timestamp())
# else if the datetime object is offset naive
else:
if(format == "str"):
return str(int((expression - datetime(1970, 1, 1)).total_seconds()))
else:
return int((expression - datetime(1970, 1, 1)).total_seconds())
# else if we are running python version lower than python3.3 i.e most linux servers
else:
if(format == "str"):
return expression.strftime("%s")
else:
return int(expression.strftime("%s")) | dc1a3c3caba2696e43b0e9f2e0d11058d7570f54 | 3,652,608 |
def import_measurements(task, subject, gsrn, session):
"""
Imports measurements for a single MeteringPoint, and starts a
start_submit_measurement_pipeline() pipeline for each of the newly
imported measurements.
:param celery.Task task:
:param str subject:
:param str gsrn:
:param sqlalchemy.orm.Session session:
"""
__log_extra = {
'gsrn': gsrn,
'subject': subject,
'pipeline': 'import_measurements',
'task': 'import_measurements',
}
@atomic
def __import_measurements(session):
"""
Import and save to DB as an atomic operation
"""
return importer.import_measurements_for(meteringpoint, session)
# Load MeteringPoint from DB
try:
meteringpoint = MeteringPointQuery(session) \
.is_active() \
.has_gsrn(gsrn) \
.one()
except orm.exc.NoResultFound:
raise
except Exception as e:
raise task.retry(exc=e)
# Import measurements into DB
try:
measurements = __import_measurements()
except Exception as e:
logger.exception('Failed to import measurements from ElOverblik, retrying...', extra=__log_extra)
raise task.retry(exc=e)
# Submit each measurement to ledger in parallel
for measurement in measurements:
task = build_submit_measurement_pipeline(
measurement, meteringpoint, session)
task.apply_async()
# if measurements:
# tasks = [
# build_submit_measurement_pipeline(measurement, meteringpoint, session)
# for measurement in measurements
# ]
#
# group(*tasks).apply_async() | 6f0fc4aec546c5cf7b23bf2471ac625639e9dbbb | 3,652,609 |
import pandas
from typing import List
def add_agg_series_to_df(
df: pandas.DataFrame, grouped_levels: List[str], bottom_levels: List[str]
) -> pandas.DataFrame:
"""
Add aggregate series columns to wide dataframe.
Parameters
----------
df : pandas.DataFrame
Wide dataframe containing bottom level series.
grouped_levels : List[str]
Grouped level, underscore delimited, column names.
bottom_levels : List[str]
Bottom level, underscore delimited, column names.
Returns
-------
pandas.DataFrame
Wide dataframe with all series in hierarchy.
"""
component_cols = _get_bl(grouped_levels, bottom_levels)
# Add series as specified grouping levels
for i, cols in enumerate(component_cols):
df[grouped_levels[i]] = df[cols].sum(axis=1)
return df | 7c3b7b526c394c8a24bf754365dbc809476b7336 | 3,652,610 |
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
convOut, conv_cache = layers.conv_forward(x, w, b, conv_param)
reluOut, relu_cache = layers.relu_forward(convOut)
out, pool_cache = layers.max_pool_forward(reluOut, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache | d9a32950d1b56b4843938b339c7233e7fc87c5cc | 3,652,611 |
def avg_pixelwise_var(images_seen: np.int16):
"""
Computes the variance for every pixel p across all images, resulting in a matrix holding
the variance for eack pixel p, then calculates the average of that variance across all
pixels. This allows us to compensate for different fov sizes.
Note: images are normalized to [-1,1] before calculations
Params
------
images_seen
A numpy matrix holding numpy versions of all of our images
Returns
-------
The aaverage pixelwise variation across all images, as a float
"""
# Computes the variance
images = (images_seen.astype(np.float32) - 127.5) / 127.5 # Normalize to [-1,1]
variance_matrix = np.var(images, 0)
# Returns the average of that variance
return(np.sum(variance_matrix)/variance_matrix.size) | 4b6196ddd25c0cd3ad0cd7cb1928b99772aa563f | 3,652,612 |
def get_r2_matrix(ts):
"""
Returns the matrix for the specified tree sequence. This is computed
via a straightforward Python algorithm.
"""
n = ts.get_sample_size()
m = ts.get_num_mutations()
A = np.zeros((m, m), dtype=float)
for t1 in ts.trees():
for sA in t1.sites():
assert len(sA.mutations) == 1
mA = sA.mutations[0]
A[sA.id, sA.id] = 1
fA = t1.get_num_samples(mA.node) / n
samples = list(t1.samples(mA.node))
for t2 in ts.trees(tracked_samples=samples):
for sB in t2.sites():
assert len(sB.mutations) == 1
mB = sB.mutations[0]
if sB.position > sA.position:
fB = t2.get_num_samples(mB.node) / n
fAB = t2.get_num_tracked_samples(mB.node) / n
D = fAB - fA * fB
r2 = D * D / (fA * fB * (1 - fA) * (1 - fB))
A[sA.id, sB.id] = r2
A[sB.id, sA.id] = r2
return A | e6a3eca421c40c9b9bbe218e7f6179eda0e07a00 | 3,652,613 |
import os
def load_or_run(filepath, fun, *args, **kwargs):
"""
계산된 결과 파일이 있으면 로딩하고, 없으면 계산후 저장
ex)
res = load_or_run('file_loadorsave', funlongtime, ...., force=False)
:param filepath:
:param fun:
:param force:
:return:
"""
force = kwargs.pop('force', False)
compress = kwargs.pop('compress', True)
if not filepath.startswith('/') or not filepath.startswith('~'):
filepath = os.path.join('/tmp/snipy/load_or_run/', filepath)
if not force and os.path.exists(filepath):
# 저장되어 있는 것 로딩
mmap_mode = 'r+' if not compress else None
return loadfile(filepath, mmap_mode=mmap_mode)
res = fun(*args, **kwargs)
savefile(res, filepath, compress=compress)
return res | 45a9f0fe1050201f863a9ba5887fe560eaee8b27 | 3,652,614 |
def omdb_title(
api_key: str,
id_imdb: str = None,
media: str = None,
title: str = None,
season: int = None,
episode: int = None,
year: int = None,
plot: str = None,
cache: bool = True,
) -> dict:
"""
Looks up media by id using the Open Movie Database.
Online docs: http://www.omdbapi.com/#parameters
"""
if (not title and not id_imdb) or (title and id_imdb):
raise MnamerException("either id_imdb or title must be specified")
elif plot and plot not in OMDB_PLOT_TYPES:
raise MnamerException(
"plot must be one of %s" % ",".join(OMDB_PLOT_TYPES)
)
url = "http://www.omdbapi.com"
parameters = {
"apikey": api_key,
"i": id_imdb,
"t": title,
"y": year,
"season": season,
"episode": episode,
"type": media,
"plot": plot,
}
parameters = clean_dict(parameters)
status, content = request_json(url, parameters, cache=cache)
error = content.get("Error") if isinstance(content, dict) else None
if status == 401:
raise MnamerException("invalid API key")
elif status != 200 or not isinstance(content, dict):
raise MnamerNetworkException("OMDb down or unavailable?")
elif error:
raise MnamerNotFoundException(error)
return content | 54efaba216b7de203fe6960f58a8ebb93b980c4c | 3,652,615 |
def get_status(addr):
"""Get the current status of a minecraft server.
addr -- server address
Returns an mcstatus object.
"""
server = MinecraftServer.lookup(addr)
try:
return server.status()
except Exception:
return None | 9e5a346d3cec803005ef0c65d24f929b56dfa68f | 3,652,616 |
def calculate_losses(estimator, input_fn, labels):
"""Get predictions and losses for samples.
The assumptions are 1) the loss is cross-entropy loss, and 2) user have
specified prediction mode to return predictions, e.g.,
when mode == tf.estimator.ModeKeys.PREDICT, the model function returns
tf.estimator.EstimatorSpec(mode=mode, predictions=tf.nn.softmax(logits)).
Args:
estimator: model to make prediction
input_fn: input function to be used in estimator.predict
labels: array of size (n_samples, ), true labels of samples (integer valued)
Returns:
preds: probability vector of each sample
loss: cross entropy loss of each sample
"""
pred = np.array(list(estimator.predict(input_fn=input_fn)))
loss = log_loss(labels, pred)
return pred, loss | 1a25519d661a6de185c39bb9c65a23a3eea71971 | 3,652,617 |
def text_cleaning(value, stopwords=None):
"""Applies the four cleaning funtions to a value.
Turns value into string, makes lowercase, strips trailing and leading spaces, and removes digits, punctuation, and stopwords
Args:
value (str): string to be cleaned
Returns:
str_out (str): string after cleaning
"""
value = str_lower_strip(value)
value = remove_digits(value)
value = remove_punctuation(value)
value = remove_stopwords(value, stopwords)
str_out = value
return str_out | 291f4150601b7537cbb4d10cb53598dcb9a83829 | 3,652,618 |
def calc_pts_lag(npts=20):
"""
Returns Gauss-Laguerre quadrature points rescaled for line scan integration
Parameters
----------
npts : {15, 20, 25}, optional
The number of points to
Notes
-----
The scale is set internally as the best rescaling for a line scan
integral; it was checked numerically for the allowed npts.
Acceptable pts/scls/approximate line integral scan error:
(pts, scl ) : ERR
------------------------------------
(15, 0.072144) : 0.002193
(20, 0.051532) : 0.001498
(25, 0.043266) : 0.001209
The previous HG(20) error was ~0.13ish
"""
scl = { 15:0.072144,
20:0.051532,
25:0.043266}[npts]
pts0, wts0 = np.polynomial.laguerre.laggauss(npts)
pts = np.sinh(pts0*scl)
wts = scl*wts0*np.cosh(pts0*scl)*np.exp(pts0)
return pts, wts | dc491bc8dd46f81809a0dc06da8c123357736622 | 3,652,619 |
def APPEND(*ext, **kw):
"""Decorator to call XDWAPI with trailing arguments *ext.
N.B. Decorated function must be of the same name as XDWAPI's one.
"""
def deco(api):
@wraps(api)
def func(*args, **kw):
args = list(args)
if "codepage" in kw:
args.append(kw["codepage"])
args.extend(ext)
return TRY(getattr(DLL, api.__name__), *args)
return func
return deco | c73dc1b192835a0eefa53f660b1af4626a3ab75c | 3,652,620 |
def stations_within_radius(stations, centre, r):
"""Returns a list of all stations (type MonitoringStation) within radius r of a geographic coordinate x."""
stations_inside_radius = []
for station, distance in stations_by_distance(stations, centre):
# Check if distance is inside the requried radius
if distance < r:
stations_inside_radius.append(station)
# Return the list
return stations_inside_radius | 8182bdfc0d46ee64e98358c06b3d4787a0f1fa52 | 3,652,621 |
def manage_topseller(request, template_name="manage/marketing/topseller.html"):
"""
"""
inline = manage_topseller_inline(request, as_string=True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("topseller-amount")
})
return render_to_string(template_name, request=request, context={
"topseller_inline": inline,
"amount_options": amount_options,
}) | e9af634b66a7f7631a0bb7633cc445f05efb615a | 3,652,622 |
from sfepy.base.base import dict_to_struct, debug
from sfepy.fem.functions import Function
def create_stabil_mat(problem):
"""Using the stabilization material stub make it the true material."""
# Identity map...
ns = {'p' : 'p', 'q' : 'q',
'u' : 'u', 'b' : 'b', 'v' : 'v',
'fluid' : 'fluid', 'omega' : 'omega', 'i1' : 'i1', 'i2' : 'i2'}
variables = problem.get_variables()
# Indices to the state vector.
ii = {}
ii['u'] = variables.get_indx(ns['u'])
ii['us'] = variables.get_indx(ns['u'], stripped=True)
ii['ps'] = variables.get_indx(ns['p'], stripped=True)
stabil_mat = problem.materials['stabil']
stabil = dict_to_struct(stabil_mat.datas['special'], flag=(1,))
# The viscosity.
fluid_mat = problem.materials[ns['fluid']]
viscosity = fluid_mat.function()['viscosity']
# The Friedrich's constant.
c_friedrichs = problem.domain.get_diameter()
sigma = 1e-12 # 1 / dt.
# Element diameter modes.
diameter_modes = {'edge' : 0, 'volume' : 1, 'max' : 2}
def mat_fun(ts, coor, mode=None, region=None, ig=None,
b_norm=1.0):
if mode != 'qp': return
print '|b|_max (mat_fun):', b_norm
gamma = viscosity + b_norm * c_friedrichs
data = {}
if stabil.gamma is None:
data['gamma'] = stabil.gamma_mul * gamma
else:
data['gamma'] = nm.asarray( stabil.gamma_mul * stabil.gamma,
dtype = nm.float64 )
data['gamma'] = nm.tile(data['gamma'], (coor.shape[0], 1, 1))
if stabil.delta is None:
term = problem.equations['balance'].terms['dw_lin_convect']
for ig in term.iter_groups():
# This sets term.ig - for 1 group only!!!
break
var = variables[ns['u']]
ap, vg = term.get_approximation(var)
delta = 1.0
mode = diameter_modes[stabil.diameter_mode]
cells = region.get_cells( ig )
diameters2 = problem.domain.get_element_diameters( ig, cells, vg,
mode )
val1 = min( 1.0, 1.0 / sigma )
val2 = sigma * c_friedrichs**2
val3 = (b_norm**2) * min( (c_friedrichs**2) / viscosity, 1.0 / sigma )
# print val1, gamma, val2, val3
delta = stabil.delta_mul * val1 * diameters2 / (gamma + val2 + val3)
n_qp = coor.shape[0] / diameters2.shape[0]
data['diameters2'] = nm.repeat(diameters2, n_qp)
data['diameters2'].shape = data['diameters2'].shape + (1, 1)
data['delta'] = nm.repeat(delta, n_qp)
data['delta'].shape = data['delta'].shape + (1, 1)
else:
val = stabil.delta_mul * stabil.delta
data['delta'] = nm.tile(data['delta'], (coor.shape[0], 1, 1))
if stabil.tau is None:
data['tau'] = stabil.tau_red * data['delta']
else:
data['tau'] = nm.asarray( stabil.tau_mul * stabil.tau,
dtype = nm.float64 )
data['tau'] = nm.tile(data['tau'], (coor.shape[0], 1, 1))
return data
stabil_mat.set_function(Function('stabil', mat_fun))
return stabil_mat, ns, ii | a48d8869c4f4d84d86815a8d0c56ecc88ae05814 | 3,652,623 |
from typing import Optional
def embedded_services(request: FixtureRequest) -> Optional[str]:
"""
Enable parametrization for the same cli option
"""
return getattr(request, 'param', None) or request.config.getoption('embedded_services', None) | 908a48d9fa8696e6970fe5884632f1b373063667 | 3,652,624 |
def vigenere(plaintext,cypher):
"""Implementation of vigenere cypher"""
i = 0
cyphertext = ""
for character in plaintext:
n = ord(cypher[i%len(cypher)].lower())-97
new_char = rot_char(character, n)
cyphertext += new_char
if new_char != ' ':
i += 1
return cyphertext | 2b5cdd839bcfc0e55cdac65f9752cf88bd34c2e2 | 3,652,625 |
def get_local_unit_slip_vector_SS(strike, dip, rake):
"""
Compute the STRIKE SLIP components of a unit slip vector.
Args:
strike (float): Clockwise angle (deg) from north of the line at the
intersection of the rupture plane and the horizontal plane.
dip (float): Angle (degrees) between rupture plane and the horizontal
plane normal to the strike (0-90 using right hand rule).
rake (float): Direction of motion of the hanging wall relative to the
foot wall, as measured by the angle (deg) from the strike vector.
Returns:
Vector: Unit slip vector in 'local' N-S, E-W, U-D coordinates.
"""
strike = np.radians(strike)
dip = np.radians(dip)
rake = np.radians(rake)
sx = np.cos(rake) * np.sin(strike)
sy = np.cos(rake) * np.cos(strike)
sz = 0.0
return Vector(sx, sy, sz) | dddedaeaabe91137c38bbaa2ec2bb1d42a6629c7 | 3,652,626 |
def get_country_code(country_name):
"""Gets the code of the country given its name"""
for code, name in COUNTRIES.items():
if name == country_name:
return code | bb4a3eebae0b14fc8207ef4301812d3d305a8dfd | 3,652,627 |
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model."""
if train_cfg is None and test_cfg is None:
return build(cfg, MODELS)
else:
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) | f47aa433bf2cbd637e9ea2e8e842bab9feb12ab1 | 3,652,628 |
def find_instruction_type(opcode: str) -> InstructionType:
"""Finds instruction type for object instruction
Parameters
----------
opcode : str
opcode of instruction in hex
Returns
-------
InstructionType
type of instruction using InstructionType enum
"""
# R type instructions always have opcode = 00
if opcode == "00":
i_type = InstructionType.R
# I type instructions have opcode > 03
elif opcode > "03":
i_type = InstructionType.I
return i_type | a8f5002834f9e9e847ef4f848a13f6e8037948f6 | 3,652,629 |
import string
def gen_tier_id(inst, id_base, tier_type=None, alignment=None, no_hyphenate=False):
"""
Unified method to generate a tier ID string. (See: https://github.com/goodmami/xigt/wiki/Conventions)
"""
# In order to number this item correctly, we need to decide how many tiers of the same type
# there are. This is done by systematically adding filters to the list.
filters = []
# First, do we align with another item? (Either segmentation, alignment, or head/dep)
if alignment is not None:
filters.append(lambda x: aln_match(alignment)(x) or seg_match(alignment)(x) or ref_match(x, alignment, DS_HEAD_ATTRIBUTE))
# Next, does the type match ours?
if tier_type is not None:
filters.append(type_match(tier_type))
# Get the number of tiers that match this.
if not filters:
prev_tiers = []
num_tiers = 0
else:
prev_tiers = inst.findall(others=filters)
num_tiers = len(prev_tiers)
id_str = id_base
# Now, if we have specified the alignment, we also want to prepend
# that to the generated id string.
if alignment is not None:
if no_hyphenate:
return '{}{}'.format(alignment, id_str)
else:
id_str = '{}-{}'.format(alignment, id_str)
# Finally, if we have multiple tiers of the same type that annotate the
# same item, we should append a letter for the different analyses.
if num_tiers > 0 and inst.find(id=id_str) is not None:
while True:
letters = string.ascii_lowercase
assert num_tiers < 26, "More than 26 alternative analyses not currently supported"
potential_id = id_str + '_{}'.format(letters[num_tiers])
if inst.find(id=potential_id) is None:
id_str = potential_id
break
else:
num_tiers += 1
return id_str | f21b94677efe25e545d7efd99c68ed1722018c35 | 3,652,630 |
def create_temporary_file(filename, contents=""):
""" Decorator for constructing a file which is available
during a single test and is deleted afterwards.
Example usage::
@grader.test
@create_temporary_file('hello.txt', 'Hello world!')
def hook_test(m):
with open('hello.txt') as file:
txt = file.read()
"""
def _inner(test_function):
before_test(create_file(filename, contents))(test_function)
after_test(delete_file(filename))(test_function)
return test_function
return _inner | b4ce96e0d239acc379d78b7c13042cea5c0a4fe0 | 3,652,631 |
def find_post_translational_modifications(filter=None, page=0, pageSize=100): # noqa: E501
"""Find values for an specific property, for example possible taxonomy values for Organism property
# noqa: E501
:param filter: Keyword to filter the list of possible values
:type filter: str
:param page: Number of the page with the possible values for the property
:type page: int
:param pageSize: Number of values with the possible values for the property
:type pageSize: int
:rtype: List[PostTranslationalModification]
"""
unimod_database = UnimodDatabase()
l = unimod_database.search_mods_by_keyword(keyword=filter)
list_found = l[(page * pageSize):(page * pageSize) + pageSize]
return list_found | 9c9d196a7d0d3e8c3b2725247504cecf822ac541 | 3,652,632 |
def random_vector(A, b):
"""
Generates a random vector satisfying Ax <= b through rejection
sampling.
"""
dimension = A.shape[1]
not_feasible = True
while not_feasible == True:
config.reject_counter = config.reject_counter + 1
if config.reject_counter == config.milestone:
config.milestone = config.milestone * 10
print(config.reject_counter, 'random vectors have been generated so far')
rand_vec = np.random.uniform(-0.5, 0.5, dimension)
if np.all(np.dot(A, rand_vec) <= b) == True:
not_feasible = False
return rand_vec | e710ef0a3e49fc7834850465f11232df546b944d | 3,652,633 |
from .transform import mapi
from typing import Callable
def mapi(mapper: Callable[[TSource, int], TResult]) -> Projection[TSource, TResult]:
"""Returns an observable sequence whose elements are the result of
invoking the mapper function and incorporating the element's index
on each element of the source."""
return mapi(mapper) | e640a1a4b68b9115ca2358502b675e4d6710ea83 | 3,652,634 |
def game_to_screen(position):
"""
Converts coordinates from game view into screen coordinates for mouse interaction
"""
return (GAME_LEFT + position[0], GAME_TOP + position[1]) | 2176d74a98db1e226dc960b14db35af303bfe9ec | 3,652,635 |
def get_graph_params(filename, nsize=1):
"""Load and process graph adjacency matrix and upsampling/downsampling matrices."""
data = np.load(filename, encoding='latin1')
A = data['A']
U = data['U']
D = data['D']
U, D = scipy_to_pytorch(A, U, D)
A = [adjmat_sparse(a, nsize=nsize) for a in A]
return A, U, D | 5c0671dbe7cd2f56aace9319f78289b1e34defa4 | 3,652,636 |
from . import computers
def _(dbmodel, backend):
"""
get_backend_entity for DummyModel DbComputer.
DummyModel instances are created when QueryBuilder queries the Django backend.
"""
djcomputer_instance = djmodels.DbComputer(
id=dbmodel.id,
uuid=dbmodel.uuid,
name=dbmodel.name,
hostname=dbmodel.hostname,
description=dbmodel.description,
transport_type=dbmodel.transport_type,
scheduler_type=dbmodel.scheduler_type,
metadata=dbmodel.metadata
)
return computers.DjangoComputer.from_dbmodel(djcomputer_instance, backend) | dd9dc5eeb0dcd54816675bd2dc19e5a0fc10a59a | 3,652,637 |
import six
def retrieve(filename, conf, return_format='dict', save_to_local=False, delete_remote=False, timeout=60):
"""Retrieving Processed Session File from server via sFTP
1. Get xml file string from server and return object
2. If save_to_local, save to local file system
Args:
filename: filename of file in outbound folder at remote server with '.asc' as extension.
conf: An instance of utils.Configuration.
return_format: Return format. The default is ‘dict’. Could be one of ‘dict’, ‘object’ or ‘xml’.
save_to_local: whether save file to local. default is false.
delete_remote: If delete the remote file after download. The default is False
timeout: Timeout in second for ssh connection for sftp.
Returns:
response XML in desired format.
Raises:
Exception depends on when get it.
"""
if not isinstance(conf, utils.Configuration):
raise utils.VantivException('conf must be an instance of utils.Configuration')
if not isinstance(filename, six.string_types) or len(filename) < 4:
raise utils.VantivException('filename must be a string, and at least 4 chars')
if not isinstance(timeout, six.integer_types) or timeout < 0:
raise utils.VantivException('timeout must be an positive int')
response_xml = _get_file_str_from_sftp(filename, conf, delete_remote, timeout)
if save_to_local:
_save_str_file(response_xml, conf.batch_response_path, filename)
return _generate_response(response_xml, return_format, conf) | 1ebf550b8a9be3019ed851a5e4571ed9a72f3e44 | 3,652,638 |
def get_simulate_func_options(
params,
options,
method="n_step_ahead_with_sampling",
df=None,
n_simulation_periods=None,
):
"""Rewrite respy's get_simulation_function such that options can be passed
and therefore the seed be changed before any run. Documentation is adapted
from :func:`respy.simulate.get_simulate_func()`
Parameters
----------
params : pandas.DataFrame
DataFrame containing the model parameters.
options : dict
Dictionary containing the model options.
method : {"n_step_ahead_with_sampling", "n_step_ahead_with_data", "one_step_ahead"}
The simulation method which can be one of three and is explained in more detail
in :func:`respy.simulate.simulate()`.
df : pandas.DataFrame or None, default None
DataFrame containing one or multiple observations per individual.
n_simulation_periods : int or None, default None
Simulate data for a number of periods. This options does not affect
``options["n_periods"]`` which controls the number of periods for which decision
rules are computed.
Returns
-------
simulate_function : :func:`simulate`
Simulation function where all arguments except the parameter vector
and the options are set.
"""
optim_paras, options = process_params_and_options(params, options)
n_simulation_periods, options = _harmonize_simulation_arguments(
method,
df,
n_simulation_periods,
options,
)
df = _process_input_df_for_simulation(df, method, options, optim_paras)
solve = get_solve_func(params, options)
n_observations = (
df.shape[0]
if method == "one_step_ahead"
else df.shape[0] * n_simulation_periods
)
shape = (n_observations, len(optim_paras["choices"]))
base_draws_sim = create_base_draws(
shape,
next(options["simulation_seed_startup"]),
"random",
)
base_draws_wage = create_base_draws(
shape,
next(options["simulation_seed_startup"]),
"random",
)
simulate_function = partial(
simulate,
base_draws_sim=base_draws_sim,
base_draws_wage=base_draws_wage,
df=df,
method=method,
n_simulation_periods=n_simulation_periods,
solve=solve,
)
return simulate_function | 9d77730facb29d460c958033873bb2ce02f5a9ed | 3,652,639 |
import os
def read_xspec_log_files(es_dir, out_rel_name, boot_num=2):
"""
Read in all XSPEC log files (with chatter set to 4) that were generated in
sed_fit_bootstrap.sh, and append each bootstrap iteration's values to its
sed_pars.Parameter.
Parameters
----------
es_dir : str
The directory with all the energy spectroscopy files from
sed_fit_bootstrap.sh.
out_rel_name : str
The relative (i.e. local) name for the output files.
boot_num : int
Number of bootstrap iterations implemented.
Returns
-------
var_pars : np.array of sed_pars.Parameter()
2-D array of the SED parameters that vary with QPO phase, over
all the bootstrap iterations.
"""
all_par_vals = np.zeros((N_SPECTRA, N_PARAMS))
for i in range(1, boot_num + 1):
# for i in range(1, 10):
boot_log_file = es_dir + "/" + out_rel_name + "_b-" + str(i) + \
"_xspec.log"
# print log_file
if os.path.isfile(boot_log_file):
par_vals = xspec_boot_log_to_array(boot_log_file)
# print "Shape of par vals:", np.shape(par_vals)
all_par_vals = np.dstack((all_par_vals, par_vals))
else:
pass
# print "All par vals:", np.shape(all_par_vals)
all_par_vals = all_par_vals[:,:,1:]
# print "All par vals:", np.shape(all_par_vals)
good_boots = np.shape(all_par_vals)[-1]
# print "Good boots:", good_boots
n_varpars = 0
delete_index = []
for j in range(N_PARAMS):
if not check_equal(all_par_vals[:,j,0].flatten()):
n_varpars += 1
else:
delete_index.append(j)
# print "Shape all par vals:", np.shape(all_par_vals)
untied_varpar_vals = np.delete(all_par_vals, delete_index, axis=1)
# print "Shape untied par vals:", np.shape(untied_varpar_vals)
# print untied_varpar_vals
return untied_varpar_vals, n_varpars, good_boots | 84df2fe905a001877977f6248bafda8e417b0fc5 | 3,652,640 |
def get_host_user_and_ssh_key_path(instance_name, project, zone):
"""Return a tuple of (hostname, username and ssh_key_path)."""
output = api.local(
'gcloud compute ssh --project "%s" --zone "%s" %s --dry-run' %
(project, zone, instance_name),
capture=True)
print output
m = re.match('/usr/bin/ssh .*-i ([^ ]+)(?: -o [^ ]+)* ([^ ]+)@([^ ]+)',
output)
return (m.group(3), m.group(2), m.group(1)) | eabc88808fe0b73e9df507ea0397c3b9eb38a8de | 3,652,641 |
def TDataStd_TreeNode_Find(*args):
"""
* class methods working on the node =================================== Returns true if the tree node T is found on the label L. Otherwise, false is returned.
:param L:
:type L: TDF_Label &
:param T:
:type T: Handle_TDataStd_TreeNode &
:rtype: bool
"""
return _TDataStd.TDataStd_TreeNode_Find(*args) | 6c37e5f05627287eab4c4c13a21d92aa6e4e6a1a | 3,652,642 |
def make_group_corr_mat(df):
"""
This function reads in each subject's aal roi time series files and creates roi-roi correlation matrices
for each subject and then sums them all together. The final output is a 3d matrix of all subjects
roi-roi correlations, a mean roi-roi correlation matrix and a roi-roi covariance matrix.
**NOTE WELL** This returns correlations transformed by the Fisher z, aka arctanh, function.
"""
# for each subject do the following
for i, (sub, f_id) in enumerate(df[['SUB_ID', 'FILE_ID']].values):
#read each subjects aal roi time series files
ts_df = pd.read_table('DATA/{}_rois_aal.1D'.format(f_id))
#create a correlation matrix from the roi all time series files
corr_mat_r = ts_df.corr()
#the correlations need to be transformed to Fisher z, which is
#equivalent to the arctanh function.
corr_mat_z = np.arctanh(corr_mat_r)
#for the first subject, add a correlation matrix of zeros that is the same dimensions as the aal roi-roi matrix
if i == 0:
all_corr_mat = np.zeros([corr_mat_z.shape[0], corr_mat_z.shape[1], len(df)])
#now add the correlation matrix you just created for each subject to the all_corr_mat matrix (3D)
all_corr_mat[:, :, i] = corr_mat_z
#create the mean correlation matrix (ignore nas - sometime there are some...)
av_corr_mat = np.nanmean(all_corr_mat, axis=2)
#create the group covariance matrix (ignore nas - sometime there are some...)
var_corr_mat = np.nanvar(all_corr_mat, axis=2)
return all_corr_mat, av_corr_mat, var_corr_mat | 4d30136e8ce46e984c0039ddaca26efd04f231b9 | 3,652,643 |
def get_tradedate(begin, end):
"""
get tradedate between begin date and end date
Params:
begin:
str,eg: '1999-01-01'
end:
str,eg: '2017-12-31'
Return:
pd.DataFrame
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_trade_day= 1 AND \
calendar_date>='" + begin + "' AND calendar_date<='" + end + "';"
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
date.columns = ['date']
date = pd.DataFrame(pd.to_datetime(date['date']))
return date
finally:
if conn:
conn.close() | 9464caee65f12b9704e63068e159494baad25e6a | 3,652,644 |
def collect_tweet(update: Update, context: CallbackContext) -> int:
"""Tweet caption collection for tweet without attachments"""
logger.info("'{update.message.text}' tweet type selected")
update.message.reply_text("Enter the tweet")
return TWEET | 6fc25efa4dc10f2316b70ff18f9a5c77b83c1e4a | 3,652,645 |
from typing import Dict
def get_unhandled_crictical_errors(req:HttpRequest, n:int):
"""
Preprocess errors before injection
and gets `n` unhandled errors
Typical Return Value if `n` errors were found...
{"es":[
{
"id": "192.168.1.51",
"title":"hey there"
}
]
}
"""
errors:Dict = {} # define return object
# get the neccessary errors
errors_query = Error.objects.filter(isHandled=False, ecode__lt=2000, ecode__gt=1000)[:n]
es_tmp:list = []
for error in errors_query:
e_tmp:dict
victim:object
# e_tmp["id"] = error.victim
code = error.ecode
# if ecode > 1500, then it belongs to the child
if code > 1500 and code < 2000:
victim = Child.objects.get(ip=error.victim).first()
e_tmp["id"] = victim.nickname | victim.ip
# if not then belongs to smart task
elif code > 1000 and code < 1500:
e_tmp["id"] = STask.objects.get(sid=error.victim).first().name
# rarely error record may be corrupted
else:
raise Exception(f"Given ecode{error.ecode} in the error{error.eid} obj is invalid")
e_tmp["title"] = get_error_title(error.ecode)
es_tmp.append(e_tmp)
del e_tmp, victim
# compile the return object
errors["es"] = es_tmp
del es_tmp
return JsonResponse(errors) | ae2afdeda89f9a9d946fa60a8ba5e15277388e50 | 3,652,646 |
def ADO_mappings(N, K, level_cutoff):
"""
ADO (auxilary density operators) are indexed by a N by (K + 1) matrix
consisting of non-negative integers.
ADO_mappings calculates all possible matrices "ado_index" of size
N by (K+1) where np.sum(m) < level_cutoff
Parameters
----------
N : integer
number of states
K : integer
number of exponentials to include in the spectral density
correlation function
level_cutoff : integer
number of levels at which to terminate the heiarchy expansion
Returns
-------
ind_to_mat : list of matrices
maps index to np.array
mat_to_ind : function
maps the np.array to the index
---------------------------------------------------------------------------
Define S to be the set of all matrices of size N by (K + 1) with
non-negative integer values.
Define level L_i as:
L_i = {m \in S | np.sum(m) == i}
L_i can be found using the multichoose function. We will preserve the order
that multichoose uses in ordering L_i
L_i corresponds to the set of ADOs in the ith heiarchy.
L_0 is a singleton set, corresponding to the RDO (reduced density matrix)
"""
bins = N * (K + 1)
permutations = []
for c in range(level_cutoff):
permutations.extend(multichoose(bins, c))
inverted_permutations = {tuple(v): i for i, v in enumerate(permutations)}
def mat_to_ind(mat):
"""maps np.array to index"""
vec = mat.flatten()
try:
return inverted_permutations[tuple(vec)]
except KeyError:
return None
ind_to_mat = [np.array(vec).reshape((N, K + 1)) for vec in permutations]
return ind_to_mat, mat_to_ind | a76da5569863ea8d17ec248eb09b2b6e5a300ad2 | 3,652,647 |
def f_beta(precision, recall, beta):
"""
Returns the F score for precision, recall and a beta parameter
:param precision: a double with the precision value
:param recall: a double with the recall value
:param beta: a double with the beta parameter of the F measure, which gives more or less weight to precision vs. recall
:return: a double value of the f(beta) measure.
"""
if np.isnan(precision) or np.isnan(recall) or (precision == 0 and recall == 0):
return np.nan
return ((1 + beta ** 2) * precision * recall) / (((beta ** 2) * precision) + recall) | be6c2b011c51d58d4b5f943671cd53b45632b48f | 3,652,648 |
import os
import tempfile
def convert_image_to_dicom(image_file):
"""Read an image file, convert it to Dicom and return the file path"""
# Load pixel array from image.
img = Image.open(image_file)
if ('RGB' == img.mode) or ('RGBA' == img.mode):
# Assuming greyscale image, keep only one channel.
pix = np.array(img)[:, :, 0]
elif 'L' == img.mode:
# One black and white channel.
pix = np.array(img)[:, :]
else:
raise ValueError('Unhandled Image mode: {}'.format(img.mode))
# Write pixel array to Dicom file.
stk = sitk.GetImageFromArray(pix)
writer = sitk.ImageFileWriter()
writer.KeepOriginalImageUIDOn()
img_basename = os.path.splitext(os.path.basename(image_file))[0] + '_'
dicom_file = tempfile.NamedTemporaryFile(prefix=img_basename).name + '.dcm'
writer.SetFileName(dicom_file)
writer.Execute(stk)
return dicom_file | 722a5364cd64b5c5261dbb746e24d37e6288f14d | 3,652,649 |
import ntpath, os, yaml
def get_cfg(existing_cfg, _log):
"""
generates
"""
_sanity_check(existing_cfg, _log)
with open(os.path.join(os.path.dirname(__file__), "{}.yml".format(ntpath.basename(__file__).split(".")[0])),
'r') as stream:
try:
ret = yaml.load(stream)
except yaml.YAMLError as exc:
assert "Default config yaml for '{}' not found!".format(os.path.splitext(__file__)[0])
return ret | 3d69096ebc1b78ad52dcc5b35b225ccfea5ff189 | 3,652,650 |
import socket
import os
import tqdm
import urllib
import logging
def download_alphafold_cif(
proteins: list,
out_folder: str,
out_format: str = "{}.cif",
alphafold_cif_url: str = 'https://alphafold.ebi.ac.uk/files/AF-{}-F1-model_v1.cif',
timeout: int = 60,
verbose_log: bool = False,
) -> tuple:
"""
Function to download .cif files of protein structures predicted by AlphaFold.
Parameters
----------
proteins : list
List (or any other iterable) of UniProt protein accessions for which to
download the structures.
out_folder : str
Path to the output folder.
alphafold_cif_url : str
The base link from where to download cif files.
The brackets {} are replaced by a protein name from the proteins list.
Default is 'https://alphafold.ebi.ac.uk/files/AF-{}-F1-model_v1.cif'.
timeout : int
Time to wait for reconnection of downloads.
Default is 60.
verbose_log: bool
Whether to write verbose logging information.
Default is False.
Returns
-------
: (list, list, list)
The lists of valid, invalid and existing protein accessions.
"""
socket.setdefaulttimeout(timeout)
valid_proteins = []
invalid_proteins = []
existing_proteins = []
if not os.path.exists(out_folder):
os.makedirs(out_folder)
for protein in tqdm.tqdm(proteins):
name_in = alphafold_cif_url.format(protein)
name_out = os.path.join(
out_folder,
out_format.format(protein)
)
if os.path.isfile(name_out):
existing_proteins.append(protein)
else:
try:
urllib.request.urlretrieve(name_in, name_out)
valid_proteins.append(protein)
except urllib.error.HTTPError:
if verbose_log:
logging.info(f"Protein {protein} not available for CIF download.")
invalid_proteins.append(protein)
logging.info(f"Valid proteins: {len(valid_proteins)}")
logging.info(f"Invalid proteins: {len(invalid_proteins)}")
logging.info(f"Existing proteins: {len(existing_proteins)}")
return(valid_proteins, invalid_proteins, existing_proteins) | c70265b920837d23c27ee5732a3a9e155a29a30d | 3,652,651 |
import sys
import os
def _gecko_path():
"""Either get the executable or raise an error"""
if sys.platform == "win32":
return os.path.join(PCKG_PATH, "win32", "geckodriver.exe")
if sys.platform == 'linux':
return os.path.join(PCKG_PATH, "linux", "geckodriver")
if sys.platform == 'darwin':
return os.path.join(PCKG_PATH, "macos", "geckodriver")
raise OSError("not supported yet") | a5b82091eca7545781740c3ff4d03da532b818a4 | 3,652,652 |
import struct
def readShort(f):
"""Read 2 bytes as BE integer in file f"""
read_bytes = f.read(2)
return struct.unpack(">h", read_bytes)[0] | 1b31c2285d055df3c128e8158dcc67eb6c0a2b18 | 3,652,653 |
def get_color(thing):
"""Get color for thing.
:param thing: Thing to get color for.
:return: Color tuple if rule exists otherwise None.
"""
for rule in _COLOR_RULES:
color = rule(thing)
if color is not None:
return color
return None | 79620c0ec8d5e9a153038b9b6a65f36158dce255 | 3,652,654 |
def build_table(infos):
""" Builds markdown table. """
table_str = '| '
for key in infos[0].keys():
table_str += key + ' | '
table_str += '\n'
table_str += '| '
for key in infos[0].keys():
table_str += '--- | '
table_str += '\n'
for info in infos:
table_str += '| '
for value in info.values():
table_str += str(value) + ' | '
table_str += '\n'
return table_str | 8d31e6abc9edd0014acbac3570e4a2bc711baa4a | 3,652,655 |
import six
import threading
def notify_telegram(title, content, token=None, chat=None, mention_user=None, **kwargs):
"""
Sends a telegram notification and returns *True* on success. The communication with the telegram
API might have some delays and is therefore handled by a thread.
"""
# test import
cfg = Config.instance()
# get default token and chat
if not token:
token = cfg.get_expanded("notifications", "telegram_token")
if not chat:
chat = cfg.get_expanded("notifications", "telegram_chat")
if not token or not chat:
logger.warning("cannot send Telegram notification, token ({}) or chat ({}) empty".format(
token, chat))
return False
# append the user to mention to the title
# unless explicitly set to empty string
mention_text = ""
if mention_user is None:
mention_user = cfg.get_expanded("notifications", "telegram_mention_user")
if mention_user:
mention_text = " (@{})".format(mention_user)
# request data for the API call
request = {
"parse_mode": "Markdown",
}
# standard or attachment content?
if isinstance(content, six.string_types):
request["text"] = "{}{}\n\n{}".format(title, mention_text, content)
else:
# content is a dict, add some formatting
request["text"] = "{}{}\n\n".format(title, mention_text)
for key, value in content.items():
request["text"] += "_{}_: {}\n".format(key, value)
# extend by arbitrary kwargs
request.update(kwargs)
# threaded, non-blocking API communication
thread = threading.Thread(target=_notify_telegram, args=(token, chat, request))
thread.start()
return True | a736025f5c6a6acff634f325ecbad0e591f30174 | 3,652,656 |
def convert_to_dapr_duration(td: timedelta) -> str:
"""Converts date.timedelta to Dapr duration format.
Args:
td (datetime.timedelta): python datetime object.
Returns:
str: dapr duration format string.
"""
total_minutes, secs = divmod(td.total_seconds(), 60.0)
hours, mins = divmod(total_minutes, 60.0)
return f'{hours:.0f}h{mins:.0f}m{secs:.0f}s' | 729cde6d2dccea1c8fa36eec506ee8ee6ea34b6e | 3,652,657 |
def get_slot(handler_input, slot_name):
# type: (HandlerInput, AnyStr) -> Optional[Slot]
"""Return the slot information from intent request.
The method retrieves the slot information
:py:class:`ask_sdk_model.slot.Slot` from the input intent request
for the given ``slot_name``. More information on the slots can be
found here :
https://developer.amazon.com/docs/custom-skills/request-types-reference.html#slot-object
If there is no such slot, then a ``None``
is returned. If the input request is not an
:py:class:`ask_sdk_model.intent_request.IntentRequest`, a
:py:class:`TypeError` is raised.
:param handler_input: The handler input instance that is generally
passed in the sdk's request and exception components
:type handler_input: ask_sdk_core.handler_input.HandlerInput
:param slot_name: Name of the slot that needs to be retrieved
:type slot_name: str
:return: Slot information for the provided slot name if it exists,
or a `None` value
:rtype: Optional[ask_sdk_model.slot.Slot]
:raises: TypeError if the input is not an IntentRequest
"""
request = handler_input.request_envelope.request
if isinstance(request, IntentRequest):
if request.intent.slots is not None:
return request.intent.slots.get(slot_name, None)
else:
return None
raise TypeError("The provided request is not an IntentRequest") | c564f3b82fb21c12b81d1fda0214c330e7355080 | 3,652,658 |
from typing import Callable
def password_to_key(
hash_implementation: Callable[[bytes], TDigestable], padding_length: int
) -> Callable[[bytes, bytes], bytes]:
"""
Create a helper function to convert passwords to SNMP compliant keys
according to :rfc:`3414`.
>>> hasher = password_to_key(hashlib.sha1, 20)
>>> key = hasher(b"mypasswd", b"target-engine-id")
>>> key.hex()
'999ec23ca66b9d3f187ab5208840c30b0450b452'
:param hash_implementation: A callable that creates an object with a
".digest()" method from a bytes-object. Usable examples are
`hashlib.md5` and `hashlib.sha1`
:param padding_length: The padding length to be used during hashing (as
defined in the SNMP rfc)
:returns: A callable which can be used to derive an SNMP compliant key
from a password.
"""
@lru_cache(maxsize=None)
def hasher(password: bytes, engine_id: bytes) -> bytes:
"""
Derive a key from a password and engine-id.
:param password: The user password
:param engine_id: The target engine ID
:returns: The derived key
"""
# Repeat the password for a total of 1MB worth of data (as per SNMP rfc)
hash_size = 1024 * 1024
num_words = hash_size // len(password)
tmp = (password * (num_words + 1))[:hash_size]
hash_instance = hash_implementation(tmp)
key = hash_instance.digest()
localised_buffer = (
key[:padding_length] + engine_id + key[:padding_length]
)
final_key = hash_implementation(localised_buffer).digest()
return final_key
hasher.__name__ = f"<hasher:{hash_implementation}>" # type: ignore
return hasher | 3f638afaa5c950f70edf39ca699701ec1709729e | 3,652,659 |
import re
def categorize_tag_key_characters(OSM_FILE = "data\\round_rock.xml", category = 'Summary'):
"""Categorizes attributes into those with:
all lower character, all lower after colon(:),
containing special/problem characters and
all all others that were not listed in above
which includes uppercase characters and/or
multiple colons.
Keyword arguments:
OSM_File -- .osm or .xml file (default "data\\round_rock.xml")
category -- print specific keys of categories of characters from regex search
(default 'Summary' ['All', 'lower', 'lower_colon', 'porblemchars', 'other'])
"""
if category == 'All':
category = ('lower', 'lower_colon', 'porblemchars', 'other')
category_list = list(category)
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
lower_set = set()
lower_colon_set = set()
problemchars_set = set()
other_set = set()
def key_type(element, keys):
if element.tag == "tag":
if lower.match(element.attrib['k']):
lower_set.add(element.attrib['k'])
keys["lower"] += 1
elif lower_colon.match(element.attrib['k']):
lower_colon_set.add(element.attrib['k'])
keys["lower_colon"] += 1
elif problemchars.match(element.attrib['k']):
problemchars_set.add(element.attrib['k'])
keys["problemchars"] += 1
else:
other_set.add(element.attrib['k'])
keys["other"] += 1
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
print(keys)
print(
"\nThere are:\n\
{} unique keys in lower,\n\
{} unique keys in lower_colon,\n\
{} unique keys in problemchars and\n\
{} unique keys in other.\n"
.format(len(lower_set), len(lower_colon_set), len(problemchars_set), len(other_set))
)
if 'lower' in category_list:
print('\n\nlower set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["lower"], sorted(lower_set)))
if 'lower_colon' in category_list:
print('lower_colon set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["lower_colon"], sorted(lower_colon_set)))
if 'problemchars' in category_list:
print('problemchars set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["problemchars"], sorted(problemchars_set)))
if 'other' in category_list:
print('other set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["other"], sorted(other_set)))
return keys
keys_dicts = process_map(OSM_FILE)
return keys_dicts | 4e2f6c6a24a14114ce8f5c8d2855847859ad4d8f | 3,652,660 |
def rotate_left(value, count, nbits, offset):
"""
Rotate a value to the left (or right)
@param value: value to rotate
@param count: number of times to rotate. negative counter means
rotate to the right
@param nbits: number of bits to rotate
@param offset: offset of the first bit to rotate
@return: the value with the specified field rotated
all other bits are not modified
"""
assert offset >= 0, "offset must be >= 0"
assert nbits > 0, "nbits must be > 0"
mask = 2**(offset+nbits) - 2**offset
tmp = value & mask
if count > 0:
for x in xrange(count):
if (tmp >> (offset+nbits-1)) & 1:
tmp = (tmp << 1) | (1 << offset)
else:
tmp = (tmp << 1)
else:
for x in xrange(-count):
if (tmp >> offset) & 1:
tmp = (tmp >> 1) | (1 << (offset+nbits-1))
else:
tmp = (tmp >> 1)
value = (value-(value&mask)) | (tmp & mask)
return value | ed24a0a958bed1ab1a01c4a858bfba0fd163e2fd | 3,652,661 |
def geo_to_string(value):
"""
Convert geo objects to strings, because they don't support equality.
"""
if isinstance(value, list):
return [geo_to_string(x) for x in value]
if isinstance(value, dict):
result = {}
for dict_key, dict_value in value.iteritems():
result[dict_key] = geo_to_string(dict_value)
return result
if isinstance(value, aerospike.GeoJSON):
return str(value)
return value | 9566a980128767ea4b2d651c88d715673e7ef005 | 3,652,662 |
import argparse
import sys
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="I'm a snitch",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('FILE1',
help='Input file 1',
metavar='FILE',
type=argparse.FileType('rt'))
parser.add_argument('FILE2',
help='Input file 2',
metavar='FILE',
type=argparse.FileType('rt'))
parser.add_argument('-o',
'--outfile',
help='Output filename',
metavar='FILE',
type=argparse.FileType('wt'),
default=sys.stdout)
return parser.parse_args() | 5ff11678d2d45f2cbb51e71841195009bb45a0f2 | 3,652,663 |
import http
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: `404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
t = loader.get_template(template_name) # You need to create a 404.html template.
return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path}))) | de0348f3c3bf963f1614d13ffc32bb79d30437b0 | 3,652,664 |
import csv
def load_data(filename):
"""
Load shopping data from a CSV file `filename` and convert into a list of
evidence lists and a list of labels. Return a tuple (evidence, labels).
evidence should be a list of lists, where each list contains the
following values, in order:
- Administrative, an integer
- Administrative_Duration, a floating point number
- Informational, an integer
- Informational_Duration, a floating point number
- ProductRelated, an integer
- ProductRelated_Duration, a floating point number
- BounceRates, a floating point number
- ExitRates, a floating point number
- PageValues, a floating point number
- SpecialDay, a floating point number
- Month, an index from 0 (January) to 11 (December)
- OperatingSystems, an integer
- Browser, an integer
- Region, an integer
- TrafficType, an integer
- VisitorType, an integer 0 (not returning) or 1 (returning)
- Weekend, an integer 0 (if false) or 1 (if true)
labels should be the corresponding list of labels, where each label
is 1 if Revenue is true, and 0 otherwise.
"""
with open("shopping.csv") as f:
reader = csv.reader(f)
next(reader)
months = ["Jan", "Feb", "Mar", "Apr", "May", "June",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
data = []
for row in reader:
data.append({
"evidence": [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5]), float(row[6]), float(row[7]), float(row[8]), float(row[9]),
months.index(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), 0 if row[15] == "New_Visitor" else 1, 0 if row[16] == "FALSE" else 1],
"label": 0 if row[17] == "FALSE" else 1
})
evidence = [row["evidence"] for row in data]
labels = [row["label"] for row in data]
return (evidence, labels) | eb2465d0ebfb7398a3742d8fb79463d3d7b076f0 | 3,652,665 |
def instance_mock(cls, request, name=None, spec_set=True, **kwargs):
"""
Return a mock for an instance of *cls* that draws its spec from the class
and does not allow new attributes to be set on the instance. If *name* is
missing or |None|, the name of the returned |Mock| instance is set to
*request.fixturename*. Additional keyword arguments are passed through to
the Mock() call that creates the mock.
"""
if name is None:
name = request.fixturename
return create_autospec(cls, _name=name, spec_set=spec_set, instance=True,
**kwargs) | ccc60e2f90f63a131059714b3ddb213246807a0b | 3,652,666 |
import functools
import torch
def auto_fp16(apply_to=None, out_fp32=False):
"""Decorator to enable fp16 training automatically.
This decorator is useful when you write custom modules and want to support
mixed precision training. If inputs arguments are fp32 tensors, they will
be converted to fp16 automatically. Arguments other than fp32 tensors are
ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp32 (bool): Whether to convert the output back to fp32.
Example:
>>> import torch.nn as nn
>>> class MyModule1(nn.Module):
>>>
>>> # Convert x and y to fp16
>>> @auto_fp16()
>>> def forward(self, x, y):
>>> pass
>>> import torch.nn as nn
>>> class MyModule2(nn.Module):
>>>
>>> # convert pred to fp16
>>> @auto_fp16(apply_to=('pred', ))
>>> def do_something(self, pred, others):
>>> pass
"""
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@auto_fp16 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
# NOTE: default args are not taken into consideration
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = {}
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper | 1b3292ce6382f82b210d07ec48557f3c06ed7259 | 3,652,667 |
def get_loci_score(state, loci_w, data_w, species_w, better_loci,
species_counts, total_individuals, total_species,
individuals):
"""
Scoring function with user-specified weights.
:param state:
:param loci_w: the included proportion of loci from the original data set (higher is better).
:param data_w: 1 - the proportion of missing data for the selected loci (higher is better).
:param species_w: the average proportion of species represented per locus (higher is better).
:param better_loci:
:param species_counts:
:param total_individuals:
:param total_species:
:param individuals:
:return:
"""
num_loci = sum(state)
species_loci_counts = {species: 0 for species in species_counts}
individual_count = 0
missing_counts = {individual: 0 for individual in individuals}
total_loci = len(better_loci)
for i in range(total_loci):
if state[i] == 0:
continue
found_species = set()
found_individuals = set()
lines = better_loci[i].split("\n")
for line in lines:
if line == "":
continue
(individual, sequence) = line[1:].split()
found_individuals.add(individual)
individual_count += 1
species = individual.split("_")[-1]
found_species.add(species)
for species in found_species:
species_loci_counts[species] += 1
# Keep track of the amount of missing data for each individual.
for individual in individuals:
if individual not in found_individuals:
missing_counts[individual] += 1
num_missing = num_loci * total_individuals - individual_count
score_comps = [loci_w * float(num_loci) / float(total_loci),
data_w * (1 - float(num_missing) / float(num_loci * total_individuals)),
species_w * float(sum([species_loci_counts[species] for species in species_loci_counts])) / (float(num_loci) * float(total_species))]
return score_comps, missing_counts | e5e12bf2f9f76e994289a33b52d4cdc3d641ec8e | 3,652,668 |
def make_per_cell_fastqs(
reads,
outdir,
channel_id,
output_format,
cell_barcode_pattern,
good_barcodes_filename):
"""Write the filtered cell barcodes in reads
from barcodes_with_significant_umi_file
fastq.gzs to outdir
Parameters
----------
reads : str
read records from fasta path
greater than or equal to min_umi_per_cell
outdir: str
write the per cell barcode fastq.gzs to outdir
channel_id: str
prefix to fastq
output_format: str
format of output files, can be either fastq or fastq.gz
cell_barcode_pattern: regex pattern
cell barcode pattern to detect in the record name
barcodes_with_significant_umi_file: list
list of containing barcodes that have significant umi counts
Returns
-------
Write the filtered cell barcodes in reads
from barcodes_with_significant_umi_file
fastq.gzs to outdir
"""
if channel_id is None:
channel_id = ""
good_barcodes = read_barcodes_file(good_barcodes_filename)
fastqs = []
record_count = 0
for record in screed.open(reads):
record_count += 1
if record_count == 0:
return fastqs
good_cell_barcode_records = get_good_cell_barcode_records(
reads, good_barcodes, cell_barcode_pattern)
for cell_barcode, records in good_cell_barcode_records.items():
if channel_id == "":
filename = "{}/{}.{}".format(
outdir, cell_barcode, output_format)
else:
filename = "{}/{}_{}.{}".format(
outdir, channel_id, cell_barcode, output_format)
write_fastq(records, filename)
fastqs.append(filename)
return fastqs | 16f45364e8a081addf7b126c3d5af4fb00de4bdc | 3,652,669 |
def plot_roc_curve(data, cls_name, title='ROC curve'):
"""
:param data: list [(fpr, tpr), (), ...]
:param cls_name: tuple of names for each class
:param title: plot title
:return:
"""
def cal_auc(tpr, fpr):
return np.trapz(tpr, fpr)
def plot_single_curve(fpr, tpr, cls_ind):
auc = cal_auc(tpr, fpr)
plt.plot(fpr, tpr, label="%s ROC curve (area = %.2f)" % (cls_name[cls_ind], auc))
return auc
assert isinstance(data, list)
if len(cls_name) == 2:
assert len(data) == 1
else:
assert len(data) == len(cls_name)
fig = plt.figure()
args = [(fpr, tpr, i) for i, (fpr, tpr) in enumerate(data)]
if len(cls_name) > 2:
auc = np.mean(list(map(lambda x: plot_single_curve(*x), args)))
else:
fpr, tpr = data[0]
auc = cal_auc(tpr, fpr)
plt.plot(fpr, tpr, label="%s vs. %s ROC curve (area = %.2f)" % (cls_name[1], cls_name[0], auc))
ax = plt.gca()
ax.plot([0, 1], [0, 1], ls="--", c=".3")
plt.title(title + ' (mean area = %.4f)' % auc)
plt.ylabel('True positive rate')
plt.xlabel('False positive rate')
plt.legend()
return fig, auc | 5b2a56c3f193954173431341185b3bdc53c33c7a | 3,652,670 |
import time
def train(elastic_coordinator, train_step, state):
"""
This is the main elastic data parallel loop. It starts from an initial 'state'.
Each iteration calls 'train_step' and returns a new state. 'train_step'
has the following interface:
state, worker_stats = train_step(state)
When 'train_step' exhausts all the data, a StopIteration exception should be
thrown.
"""
assert isinstance(state, torchelastic.State)
failure_count = 0
rank = 0
checkpoint_util = CheckpointUtil(elastic_coordinator)
while not elastic_coordinator.should_stop_training():
# See: https://github.com/pytorch/elastic/issues/7
if failure_count >= MAX_FAILURES:
e = RuntimeError(
"Exceeded max number of recoverable failures: {}".format(failure_count)
)
elastic_coordinator.on_error(e)
raise e
start_time = time.time()
snapshot = state.capture_snapshot()
try:
store, rank, world_size = elastic_coordinator.rendezvous_barrier()
elastic_coordinator.init_process_group()
# load checkpoint if necessary
state = checkpoint_util.load_checkpoint(state, rank)
state_sync_start_time = time.time()
state.sync(world_size, rank)
publish_metric(
"torchelastic",
"state_sync.duration.ms",
get_elapsed_time_ms(state_sync_start_time),
)
checkpoint_util.set_checkpoint_loaded()
elastic_coordinator.barrier()
log.info("Rank {0} synced state with other nodes".format(rank))
except StopException:
log.info("Rank {0} received stopped signal. Exiting training.".format(rank))
break
except RuntimeError as e:
# See: https://github.com/pytorch/elastic/issues/7
elastic_coordinator.on_error(e)
state.apply_snapshot(snapshot)
failure_count += 1
continue
except (NonRetryableException, Exception) as e:
elastic_coordinator.on_error(e)
raise
finally:
publish_metric(
"torch_elastic",
"outer_train_loop.duration.ms",
get_elapsed_time_ms(start_time),
)
# Note that the loop might not even start if the rendezvous was closed
# due to one of the trainer processes completing earlier.
while not elastic_coordinator.should_stop_training():
start_time = time.time()
snapshot = state.capture_snapshot()
try:
train_step_start_time = time.time()
state, worker_stats = train_step(state)
publish_metric(
"torchelastic",
"train_step.duration.ms",
get_elapsed_time_ms(train_step_start_time),
)
elastic_coordinator.monitor_progress(state, worker_stats)
checkpoint_util.save_checkpoint(state, rank)
if elastic_coordinator.should_rendezvous(state):
log.info("Rank {0} will re-rendezvous".format(rank))
# Executor told us, for whatever reason, to re-rendezvous.
# This can occur if another node encounters an error,
# if a new node becomes available to train,
# or potentially even if it's time to checkpoint.
break
elastic_coordinator.report_progress(state)
except StopIteration:
log.info("Rank {0} finished all the iterations".format(rank))
# Current trainer process completed processing assigned subset of
# examples. Other trainer processes need to stop as well.
# This sends an explicit signal on training completion.
elastic_coordinator.signal_training_done()
break
except RuntimeError as e:
# See: https://github.com/pytorch/elastic/issues/7
elastic_coordinator.on_error(e)
state.apply_snapshot(snapshot)
failure_count += 1
break
except Exception as e:
elastic_coordinator.on_error(e)
raise
finally:
publish_metric(
"torchelastic",
"inner_train_loop.duration.ms",
get_elapsed_time_ms(start_time),
)
if elastic_coordinator.should_stop_training():
return state
else:
# This is an error condition and should not happen.
raise Exception(
"Exiting without training complete. rank: {0},"
" should_stop_training: {1}".format(
rank, elastic_coordinator.should_stop_training()
)
) | ea7886bba7db96ff85e1687b3b3e24cbc8f8af9d | 3,652,671 |
def make_animation(data_list, **kwargs):
"""
Creates an animation from list of McStasData objects
Parameters
----------
data_list : list of McStasData
List of McStasData objects for animation
Keyword arguments
-----------------
filename : str
Filename for saving the gif
fps : float
Number of frames per second
"""
figsize, data_list = _handle_kwargs(data_list, **kwargs)
if "fps" in kwargs:
period_in_ms = 1000 / kwargs["fps"]
else:
period_in_ms = 200
# find limits for entire dataset
maximum_values = []
minimum_values = []
is_1D = False
is_2D = False
for data in data_list:
if isinstance(data.metadata.dimension, int):
is_1D = True
elif len(data.metadata.dimension) == 2:
is_2D = True
min_value, max_value = _find_min_max_I(data)
# When data empty, min and max value is 0, skip
if not (min_value == 0 and max_value == 0):
minimum_values.append(min_value)
maximum_values.append(max_value)
if is_1D and is_2D:
raise ValueError(
"Both 1D and 2D data in animation, only one allowed.")
if len(minimum_values) == 0:
raise ValueError(
"No data found for animation!")
maximum_value = np.array(maximum_values).max()
minimum_value = np.array(minimum_values).min()
if "orders_of_mag" in kwargs:
orders_of_mag = kwargs["orders_of_mag"]
mag_diff = np.log10(maximum_value) - np.log10(minimum_value)
if mag_diff > orders_of_mag:
minimum_value_log10 = np.log10(maximum_value) - orders_of_mag
minimum_value = 10**(minimum_value_log10)
kwargs["fixed_minimum_value"] = minimum_value
kwargs["fixed_maximum_value"] = maximum_value
fig, ax0 = plt.subplots(figsize=figsize)
im = _plot_fig_ax(data_list[0], fig, ax0, **kwargs)
def animate_2D(index):
data = data_list[index]
intensity = data.Intensity
im.set_array(intensity.ravel())
return im,
anim = animation.FuncAnimation(fig, animate_2D,
frames=len(data_list),
interval=period_in_ms,
blit=False, repeat=True)
plt.show()
# The animation doesn't play unless it is saved. Bug.
if "filename" in kwargs:
filename = kwargs["filename"]
if not filename.endswith(".gif"):
filename = filename + ".gif"
# check if imagemagick available?
print("Saving animation with filename : \"" + filename + "\"")
anim.save(filename, writer="imagemagick") | 334a04f660018d2a05da1ceadf5ef2fdc8e8cdc6 | 3,652,672 |
import torch
def softmax_mask(w: torch.Tensor,
dim=-1,
mask: torch.BoolTensor = None
) -> torch.Tensor:
"""
Allows having -np.inf in w to mask out, or give explicit bool mask
:param w:
:param dim:
:param mask:
:return:
"""
if mask is None:
mask = w != -np.inf
minval = torch.min(w[~mask]) # to avoid affecting torch.max
w1 = w.clone()
w1[~mask] = minval
# to prevent over/underflow
w1 = w1 - torch.max(w1, dim=dim, keepdim=True)[0]
w1 = torch.exp(w1)
p = w1 / torch.sum(w1 * mask.float(), dim=dim, keepdim=True)
p[~mask] = 0.
return p | 6cf295b308040d3ad4019ab8292b37a679fb6e27 | 3,652,673 |
from jack.readers.multiple_choice.shared import MultipleChoiceSingleSupportInputModule
from jack.readers.natural_language_inference.modular_nli_model import ModularNLIModel
from jack.readers.multiple_choice.shared import SimpleMCOutputModule
from typing import Union
def modular_nli_reader(resources_or_conf: Union[dict, SharedResources] = None):
"""Creates a Modular NLI reader instance. Model defined in config."""
shared_resources = create_shared_resources(resources_or_conf)
input_module = MultipleChoiceSingleSupportInputModule(shared_resources)
model_module = ModularNLIModel(shared_resources)
output_module = SimpleMCOutputModule(shared_resources)
return TFReader(shared_resources, input_module, model_module, output_module) | 03032966355fcb3405cbc2f311908d7be1f2485d | 3,652,674 |
def move_box_and_gtt(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box_or_gtt` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=True,
max_num_predicates=2,
control_timestep=control_timestep,
time_limit=time_limit) | 4ae2377d0449b93d3dc0ff34e18010c7bff004d8 | 3,652,675 |
import json
def get_body(data_dict, database_id, media_status, media_type):
"""
获取json数据
:param media_type:
:param media_status:
:param data_dict:
:param database_id:
:return:
"""
status = ""
music_status = ""
if media_status == MediaStatus.WISH.value:
status = "想看"
music_status = "想听"
elif media_status == MediaStatus.DO.value:
status = "在看"
music_status = "在听"
elif media_status == MediaStatus.COLLECT.value:
status = "看过"
music_status = "听过"
else:
status = ""
music_status = ""
log_detail.info(f"【RUN】- {media_type}数据信息整理为json格式")
rating = data_dict[MediaInfo.RATING_F.value]
# rating = float(rat) if rat == "" else 0
if media_type == MediaType.MUSIC.value:
body = {
"parent": {
"type": "database_id",
"database_id": f"{database_id}"
},
"properties": {
"音乐": {
"title": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.TITLE.value]
}
}]
},
"封面": {
"files": [{
"type": "external",
"name": data_dict[MediaInfo.IMG.value][-13:],
"external": {
"url": data_dict[MediaInfo.IMG.value]
}
}]
},
"表演者": {
"rich_text": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.PERFORMER.value]
}
}]
},
"发行时间": {
"select": {
"name": data_dict[MediaInfo.RELEASE_DATE.value][0:4]
}
},
"标记状态": {
"select": {
"name": f"{music_status}"
}
},
"豆瓣链接": {
"url": f"{data_dict[MediaInfo.URL.value]}"
}
}
}
# 评分
if data_dict[MediaInfo.RATING_F.value]:
rating_f = float(data_dict[MediaInfo.RATING_F.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=rating_f)
body["properties"]["评分"] = tmp_dict
# 评分人数
if data_dict[MediaInfo.ASSESS.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=data_dict[MediaInfo.ASSESS.value])
body["properties"]["评分人数"] = tmp_dict
return body
elif media_type == MediaType.MOVIE.value:
# 导演 编剧 主演
text_director = ' / '.join(data_dict[MediaInfo.DIRECTOR.value])
text_screenwriter = ' / '.join(data_dict[MediaInfo.SCREENWRITER.value])
text_starring = ' / '.join(data_dict[MediaInfo.STARRING.value])
str_type = get_multi_select_body(data_dict[MediaInfo.MOVIE_TYPE.value])
json_type = json.loads(str_type)
str_c_or_r = get_multi_select_body(data_dict[MediaInfo.C_OR_R.value])
json_c_or_r = json.loads(str_c_or_r)
body = {
"parent": {
"type": "database_id",
"database_id": f"{database_id}"
},
"properties": {
"名字": {
"title": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.TITLE.value]
}
}]
},
"导演": {
"rich_text": [{
"type": "text",
"text": {
"content": text_director
}
}]
},
"编剧": {
"rich_text": [{
"type": "text",
"text": {
"content": text_screenwriter
}
}]
},
"主演": {
"rich_text": [{
"type": "text",
"text": {
"content": text_starring
}
}]
},
"类型": {
"multi_select": json_type
},
"国家地区": {
"multi_select": json_c_or_r
},
"IMDb": {
"url": f"https://www.imdb.com/title/{data_dict[MediaInfo.IMDB.value]}"
},
"标记状态": {
"select": {
"name": f"{status}"
}
},
"分类": {
"select": {
"name": f"{data_dict[MediaInfo.CATEGORIES.value]}"
}
},
"简介": {
"rich_text": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.RELATED.value]
}
}]
},
"封面": {
"files": [{
"type": "external",
"name": data_dict[MediaInfo.IMG.value][-15:],
"external": {
"url": data_dict[MediaInfo.IMG.value]
}
}]
},
"豆瓣链接": {
"url": f"{data_dict[MediaInfo.URL.value]}"
}
}
}
# 评分
if data_dict[MediaInfo.RATING_F.value]:
rating_f = float(data_dict[MediaInfo.RATING_F.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=rating_f)
body["properties"]["评分"] = tmp_dict
# 评分人数
if data_dict[MediaInfo.ASSESS.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=data_dict[MediaInfo.ASSESS.value])
body["properties"]["评分人数"] = tmp_dict
return body
elif media_type == MediaType.BOOK.value:
body = {
"parent": {
"type": "database_id",
"database_id": f"{database_id}"
},
"properties": {
"书名": {
"title": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.TITLE.value]
}
}]
},
"封面": {
"files": [{
"type": "external",
"name": data_dict[MediaInfo.IMG.value][-13:],
"external": {
"url": data_dict[MediaInfo.IMG.value]
}
}]
},
"作者": {
"rich_text": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.AUTHOR.value]
}
}]
},
"出版年份": {
"select": {
"name": data_dict[MediaInfo.PUB_DATE.value][0:4]
}
},
"标记状态": {
"select": {
"name": f"{status}"
}
},
"豆瓣链接": {
"url": f"{data_dict[MediaInfo.URL.value]}"
}
}
}
# ISBN
if data_dict[MediaInfo.ISBN.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.URL.value,
property_params=data_dict[MediaInfo.ISBN.value])
body["properties"]["ISBN"] = tmp_dict
# 价格
if data_dict[MediaInfo.PRICE.value]:
tmp_float = float(data_dict[MediaInfo.PRICE.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=tmp_float)
body["properties"]["价格"] = tmp_dict
# 评分
if data_dict[MediaInfo.RATING_F.value]:
rating_f = float(data_dict[MediaInfo.RATING_F.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=rating_f)
body["properties"]["评分"] = tmp_dict
# 评分人数
if data_dict[MediaInfo.ASSESS.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=data_dict[MediaInfo.ASSESS.value])
body["properties"]["评分人数"] = tmp_dict
# 页数
if data_dict[MediaInfo.PAGES.value]:
pages_num = int(data_dict[MediaInfo.PAGES.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=pages_num)
body["properties"]["页数"] = tmp_dict
# 出版社
if data_dict[MediaInfo.PUBLISHER.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.SELECT.value,
property_params=data_dict[MediaInfo.PUBLISHER.value])
body["properties"]["出版社"] = tmp_dict
return body | 4d09b4000c47dc1dc56aa73ca7b176d40f360e97 | 3,652,676 |
def _find_full_periods(events, quantity, capacity):
"""Find the full periods."""
full_periods = []
used = 0
full_start = None
for event_date in sorted(events):
used += events[event_date]['quantity']
if not full_start and used + quantity > capacity:
full_start = event_date
elif full_start and used + quantity <= capacity:
full_periods.append((full_start, event_date))
full_start = None
return full_periods | 16c36ce8cc5a91031117534e66c67605e13e3bd4 | 3,652,677 |
import random
def crop_image(img, target_size, center):
""" crop_image """
height, width = img.shape[:2]
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = random.randint(0, width - size)
h_start = random.randint(0, height - size)
w_end = w_start + size
h_end = h_start + size
img = img[h_start:h_end, w_start:w_end, :]
return img | c61d4410155501e3869f2e243af22d7fc13c10ee | 3,652,678 |
def _histogram(
data=None,
bins="freedman-diaconis",
p=None,
density=False,
kind="step",
line_kwargs={},
patch_kwargs={},
**kwargs,
):
"""
Make a plot of a histogram of a data set.
Parameters
----------
data : array_like
1D array of data to make a histogram out of
bins : int, array_like, or str, default 'freedman-diaconis'
If int or array_like, setting for `bins` kwarg to be passed to
`np.histogram()`. If 'exact', then each unique value in the
data gets its own bin. If 'integer', then integer data is
assumed and each integer gets its own bin. If 'sqrt', uses the
square root rule to determine number of bins. If
`freedman-diaconis`, uses the Freedman-Diaconis rule for number
of bins.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
density : bool, default False
If True, normalized the histogram. Otherwise, base the histogram
on counts.
kind : str, default 'step'
The kind of histogram to display. Allowed values are 'step' and
'step_filled'.
line_kwargs : dict
Any kwargs to be passed to p.line() in making the line of the
histogram.
patch_kwargs : dict
Any kwargs to be passed to p.patch() in making the fill of the
histogram.
kwargs : dict
All other kwargs are passed to bokeh.plotting.figure()
Returns
-------
output : Bokeh figure
Figure populated with histogram.
"""
if data is None:
raise RuntimeError("Input `data` must be specified.")
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "density" if density else "count")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
y_range = kwargs.pop("y_range", bokeh.models.DataRange1d(start=0))
p = bokeh.plotting.figure(y_axis_label=y_axis_label, y_range=y_range, **kwargs)
# Compute histogram
bins = _bins_to_np(data, bins)
e0, f0 = _compute_histogram(data, bins, density)
if kind == "step":
p.line(e0, f0, **line_kwargs)
if kind == "step_filled":
x2 = [e0.min(), e0.max()]
y2 = [0, 0]
p = fill_between(e0, f0, x2, y2, show_line=True, p=p, patch_kwargs=patch_kwargs)
return p | cc766f3367c0c7b5c3c1f56c120f8e682c14a8fb | 3,652,679 |
def getGarbageBlock():
"""获取正在标记的众生区块
{
?block_id=
}
返回 json
{
"is_success":bool,
"data": {"id": ,
"election_period":
"beings_block_id":
"votes":
"vote_list":
"status":
"create_time":
"""
try:
beings_block_id = request.args.get("block_id")
res = blockOfGarbage.getGarbageBlockQueue(beings_block_id)
if res is None:
http_message = HttpMessage(is_success=False, data=res)
else:
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson() | 9faff268d1f492adde467a100d93e99d0b2cc583 | 3,652,680 |
def partition(items, low, high):
"""Return index `p` after in-place partitioning given items in range
`[low...high]` by choosing a pivot (TODO: document your method here) from
that range, moving pivot into index `p`, items less than pivot into range
`[low...p-1]`, and items greater than pivot into range `[p+1...high]`.
TODO: Running time: ??? O(n)
TODO: Memory usage: ??? O(1)
# TODO: Choose a pivot any way and document your method in docstring above
# TODO: Loop through all items in range [low...high]
# TODO: Move items less than pivot into front of range [low...p-1]
# TODO: Move items greater than pivot into back of range [p+1...high]
# TODO: Move pivot item into final position [p] and return index p
"""
divider = low #keeps track of the pivot index used for comparision
pivot = high #default pivot index
for i in range(low, high):
# Move items less than pivot into front of range [low...p-1]
# Move items greater than pivot into back of range [p+1...high]
if items[i] < items[pivot]: #this does the work
items[i], items[divider] = items[divider], items[i] # by moving the items less than
divider += 1 # and leaving items greater where they are
# Move pivot item into final position [p] and return index p
items[pivot], items[divider] = items[divider], items[pivot]
return divider | 23e98f9af46239e74ce990f7c0873159d3dff43e | 3,652,681 |
from typing import Dict
import itertools
def cluster_confusion_matrix(pred_cluster:Dict, target_cluster:Dict) -> EvalUnit:
""" simulate confusion matrix
Args:
pred_cluster: Dict element: cluster_id (cluster_id from 0 to max_size)| predicted clusters
target_cluster: Dict element:cluster_id (cluster_id from 0 to max_size) | target clusters
Returns:
In order to return detailed data, It will return a EvalUnit,
"""
pred_elements = list(pred_cluster.keys())
target_elements = list(target_cluster.keys())
it = itertools.product(pred_elements,target_elements)
tp,fp,tn,fn = 0,0,0,0
for x,y in it:
if x != y:#other word
x_cluster = pred_elements[x]
x_cluster_ = target_elements[x]
y_cluster = pred_elements[y]
y_cluster_ = target_elements[y]
if x_cluster == y_cluster and x_cluster_ == y_cluster_:
tp += 1
elif x_cluster != y_cluster and x_cluster_ != y_cluster_:
tn += 1
elif x_cluster == y_cluster and x_cluster_ != y_cluster_:
fp += 1
else:
fn +=1
return EvalUnit(tp,tn,fp,fn,'rand_index') | b3a5afb5c01cf5cb07c43e3666111d06e9229259 | 3,652,682 |
def change_short(x, y, limit):
"""Return abs(y - x) as a fraction of x, but with a limit.
>>> x, y = 2, 5
>>> abs(y - x) / x
1.5
>>> change_short(x, y, 100)
1.5
>>> change_short(x, y, 1) # 1 is smaller than 1.5
1
>>> x = 0
>>> change_short(x, y, 100) # No error, even though abs(y - x) / x divides by 0!
100
"""
return limited(x, limit if (x == 0) else abs(y - x) / x, limit) | 1d4965650f12c95ba54f1ce38fc63e1e6eb39573 | 3,652,683 |
import copy
import tqdm
def generate_correlation_map(f, x_data, y_data, method='chisquare_spectroscopic', filter=None, resolution_diag=20, resolution_map=15, fit_args=tuple(), fit_kws={}, distance=5, npar=1):
"""Generates a correlation map for either the chisquare or the MLE method.
On the diagonal, the chisquare or loglikelihood is drawn as a function of one fixed parameter.
Refitting to the data each time gives the points on the line. A dashed line is drawn on these
plots, with the intersection with the plots giving the correct confidence interval for the
parameter. In solid lines, the interval estimated by the fitting routine is drawn.
On the offdiagonal, two parameters are fixed and the model is again fitted to the data.
The change in chisquare/loglikelihood is mapped to 1, 2 and 3 sigma contourmaps.
Parameters
----------
f: :class:`.BaseModel`
Instance of the model for which the contour map has to be generated.
x_data: array_like or list of array_likes
Data on the x-axis for the fit. Must be appropriate input for *f*.
y_data: array_like or list of array_likes
Data on the y-axis for the fit. Must be appropriate input for *f*.
Other parameters
----------------
method: {'chisquare', 'chisquare_spectroscopic', mle'}
Chooses between generating the map for the chisquare routine or for
the likelihood routine.
filter: list of strings
Only the parameters matching the names given in this list will be used
to generate the maps.
resolution_diag: int
Number of points for the line plot on each diagonal.
resolution_map: int
Number of points along each dimension for the meshgrids.
fit_kws: dictionary
Dictionary of keywords to pass on to the fitting routine.
npar: int
Number of parameters for which simultaneous predictions need to be made.
Influences the uncertainty estimates from the parabola."""
# Save the original goodness-of-fit and parameters for later use
mapping = {'chisquare_spectroscopic': (fitting.chisquare_spectroscopic_fit, 'chisqr_chi'),
'chisquare': (fitting.chisquare_fit, 'chisqr_chi'),
'mle': (fitting.likelihood_fit, 'likelihood_mle')}
func, attr = mapping.pop(method.lower(), (fitting.chisquare_spectroscopic_fit, 'chisqr_chi'))
title = '{}\n${}_{{-{}}}^{{+{}}}$'
title_e = '{}\n$({}_{{-{}}}^{{+{}}})e{}$'
fit_kws['verbose'] = False
fit_kws['hessian'] = False
to_save = {'mle': ('fit_mle', 'result_mle')}
to_save = to_save.pop(method.lower(), ('chisq_res_par', 'ndof_chi', 'redchi_chi'))
saved = [copy.deepcopy(getattr(f, attr)) for attr in to_save]
# func(f, x_data, y_data, *fit_args, **fit_kws)
orig_value = getattr(f, attr)
orig_params = copy.deepcopy(f.params)
state = fitting._get_state(f, method=method.lower())
ranges = {}
chifunc = lambda x: chi2.cdf(x, npar) - 0.682689492 # Calculate 1 sigma boundary
boundary = optimize.root(chifunc, npar).x[0] * 0.5 if method.lower() == 'mle' else optimize.root(chifunc, npar).x[0]
# Select all variable parameters, generate the figure
param_names = []
no_params = 0
for p in orig_params:
if orig_params[p].vary and (filter is None or any([f in p for f in filter])):
no_params += 1
param_names.append(p)
fig, axes, cbar = _make_axes_grid(no_params, axis_padding=0, cbar=no_params > 1)
# Make the plots on the diagonal: plot the chisquare/likelihood
# for the best fitting values while setting one parameter to
# a fixed value.
saved_params = copy.deepcopy(f.params)
function_kws = {'method': method.lower(), 'func_args': fit_args, 'func_kwargs': fit_kws}
function_kws['orig_stat'] = orig_value
for i in range(no_params):
params = copy.deepcopy(saved_params)
ranges[param_names[i]] = {}
# Set the y-ticklabels.
ax = axes[i, i]
ax.set_title(param_names[i])
if i == no_params-1:
if method.lower().startswith('chisquare'):
ax.set_ylabel(r'$\Delta\chi^2$')
else:
ax.set_ylabel(r'$\Delta\mathcal{L}$')
# Select starting point to determine error widths.
value = orig_params[param_names[i]].value
stderr = orig_params[param_names[i]].stderr
print(stderr)
stderr = stderr if stderr is not None else 0.01 * np.abs(value)
stderr = stderr if stderr != 0 else 0.01 * np.abs(value)
result_left, success_left = fitting._find_boundary(-stderr, param_names[i], boundary, f, x_data, y_data, function_kwargs=function_kws)
result_right, success_right = fitting._find_boundary(stderr, param_names[i], boundary, f, x_data, y_data, function_kwargs=function_kws)
success = success_left * success_right
ranges[param_names[i]]['left'] = result_left
ranges[param_names[i]]['right'] = result_right
if not success:
print("Warning: boundary calculation did not fully succeed for " + param_names[i])
right = np.abs(ranges[param_names[i]]['right'] - value)
left = np.abs(ranges[param_names[i]]['left'] - value)
params[param_names[i]].vary = False
left_val, right_val = max(value - distance * left, orig_params[param_names[i]].min), min(value + distance * right, orig_params[param_names[i]].max)
ranges[param_names[i]]['right_val'] = right_val
ranges[param_names[i]]['left_val'] = left_val
value_range = np.linspace(left_val, right_val, resolution_diag)
value_range = np.sort(np.append(value_range, np.array([value - left, value + right, value])))
chisquare = np.zeros(len(value_range))
# Calculate the new value, and store it in the array. Update the progressbar.
with tqdm.tqdm(value_range, desc=param_names[i], leave=True) as pbar:
for j, v in enumerate(value_range):
chisquare[j] = fitting.calculate_updated_statistic(v, param_names[i], f, x_data, y_data, **function_kws)
fitting._set_state(f, state, method=method.lower())
pbar.update(1)
# Plot the result
ax.plot(value_range, chisquare, color='k')
c = '#0093e6'
# Indicate the used interval.
ax.axvline(value + right, ls="dashed", color=c)
ax.axvline(value - left, ls="dashed", color=c)
ax.axvline(value, ls="dashed", color=c)
ax.axhline(boundary, color=c)
up = '{:.2ug}'.format(u.ufloat(value, right))
down = '{:.2ug}'.format(u.ufloat(value, left))
val = up.split('+')[0].split('(')[-1]
r = up.split('-')[1].split(')')[0]
l = down.split('-')[1].split(')')[0]
if 'e' in up or 'e' in down:
ex = up.split('e')[-1]
ax.set_title(title_e.format(param_names[i], val, l, r, ex))
else:
ax.set_title(title.format(param_names[i], val, l, r))
# Restore the parameters.
fitting._set_state(f, state, method=method.lower())
for i, j in zip(*np.tril_indices_from(axes, -1)):
params = copy.deepcopy(orig_params)
ax = axes[i, j]
x_name = param_names[j]
y_name = param_names[i]
if j == 0:
ax.set_ylabel(y_name)
if i == no_params - 1:
ax.set_xlabel(x_name)
right = ranges[x_name]['right_val']
left = ranges[x_name]['left_val']
x_range = np.append(np.linspace(left, right, resolution_map), orig_params[x_name].value)
x_range = np.sort(x_range)
right = ranges[y_name]['right_val']
left = ranges[y_name]['left_val']
y_range = np.append(np.linspace(left, right, resolution_map), orig_params[y_name].value)
y_range = np.sort(y_range)
X, Y = np.meshgrid(x_range, y_range)
Z = np.zeros(X.shape)
i_indices, j_indices = np.indices(Z.shape)
with tqdm.tqdm(i_indices.flatten(), desc=param_names[j]+' ' + param_names[i], leave=True) as pbar:
for k, l in zip(i_indices.flatten(), j_indices.flatten()):
x = X[k, l]
y = Y[k, l]
print(x, y, f.params['Background0'].value)
Z[k, l] = fitting.calculate_updated_statistic([x, y], [x_name, y_name], f, x_data, y_data, **function_kws)
fitting._set_state(f, state, method=method.lower())
pbar.update(1)
Z = -Z
npar = 1
bounds = []
for bound in [0.997300204, 0.954499736, 0.682689492]:
chifunc = lambda x: chi2.cdf(x, npar) - bound # Calculate 1 sigma boundary
bounds.append(-optimize.root(chifunc, npar).x[0])
# bounds = sorted([-number*number for number in np.arange(1, 9, .1)])
bounds.append(1)
if method.lower() == 'mle':
bounds = [b * 0.5 for b in bounds]
norm = mpl.colors.BoundaryNorm(bounds, invcmap.N)
contourset = ax.contourf(X, Y, Z, bounds, cmap=invcmap, norm=norm)
f.params = copy.deepcopy(orig_params)
if method.lower() == 'mle':
f.fit_mle = copy.deepcopy(orig_params)
else:
f.chisq_res_par
try:
cbar = plt.colorbar(contourset, cax=cbar, orientation='vertical')
cbar.ax.yaxis.set_ticks([0, 1/6, 0.5, 5/6])
cbar.ax.set_yticklabels(['', r'3$\sigma$', r'2$\sigma$', r'1$\sigma$'])
except:
pass
setattr(f, attr, orig_value)
for attr, value in zip(to_save, saved):
setattr(f, attr, copy.deepcopy(value))
for a in axes.flatten():
if a is not None:
for label in a.get_xticklabels()[::2]:
label.set_visible(False)
for label in a.get_yticklabels()[::2]:
label.set_visible(False)
return fig, axes, cbar | 1294f1a93a98602ee50e5e52aacea6e678625520 | 3,652,684 |
import socket
def local_ip():
"""find out local IP, when running spark driver locally for remote cluster"""
ip = ((([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(
("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0])
return ip | d45978f3f433adba5cb1181d71cb367ceabd880f | 3,652,685 |
def string_to_int_replacements(setting, item, for_property):
"""Maps keys to values from setting and item for replacing string
templates for settings which need to be converted to/from Strings.
"""
replacements = common_replacements(setting, item)
replacements.update({
'$string_to_int': string_to_int(item, property=for_property),
'$int_to_string': int_to_string(item)})
return replacements | c6ae6a55fdda2fe13bd3ac7001da528d704e3df7 | 3,652,686 |
import os
def get_var_path(path):
"""
get the path stored in the 'app/data' directory
:param path: string
:return: string
"""
return os.path.join(VAR_DIR, path) | 9341acbdefde801b12229ed3e8275495e5ed37a7 | 3,652,687 |
import pathlib
import subprocess
def GitClone(
clone_url: str,
destination: pathlib.Path,
shallow: bool = False,
recursive: bool = False,
timeout: int = 3600,
) -> pathlib.Path:
"""Clone a repository from Github.
Args:
clone_url: The URL of the repo to clone.
destination: The output path. If this is already a non-empty directory, this
will fail.
shallow: Perform a shallow clone if set.
recursive: Clone submodules.
timeout: The maximum number of seconds to run a clone for before failing.
Returns:
The destination argument.
Raises:
RepoCloneFailed: On error.
"""
cmd = [
"timeout",
"-s9",
str(timeout),
"git",
"clone",
clone_url,
str(destination),
]
if shallow:
cmd += ["--depth", "1"]
if recursive:
cmd.append("--recursive")
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
raise RepoCloneFailed(f"Failed to clone repository: {clone_url}")
if not (destination / ".git").is_dir():
raise RepoCloneFailed(
f"Cloned repo `{clone_url}` but `{destination}/.git` not found"
)
return destination | 1c164c0af81cd83d956d66b8fe073cedb0d16a18 | 3,652,688 |
import struct
def UnpackU32(buf, offset=0, endian='big'):
""" Unpack a 32-bit unsigned integer into 4 bytes.
Parameters:
buf - Input packed buffer.
offset - Offset in buffer.
endian - Byte order.
Return:
2-tuple of unpacked value, new buffer offset.
"""
try:
return (struct.unpack_from(_EndianCode[endian]+'I', buf, offset)[0],
offset+4)
except (KeyError, TypeError, DeprecationWarning, struct.error) as inst:
_UnpackException('u32', offset, endian, inst) | 061ba5e8c4db891100549d0181475b8915d9fb0a | 3,652,689 |
def get_reset_token(user, expires_sec=1800):
"""
Create a specify token for reset user password
Args:
user:
expires_sec:
Returns:
token: string
"""
hash_token_password = Serializer(APP.config['SECRET_KEY'], expires_sec)
return hash_token_password.dumps({'user_name': user.user_name}).decode('utf-8') | 6477f03ca25a206db18c4a0a59ae0fac1106e262 | 3,652,690 |
from typing import Union
def _on_error_resume_next(*sources: Union[Observable, Future]) -> Observable:
"""Continues an observable sequence that is terminated normally or
by an exception with the next observable sequence.
Examples:
>>> res = rx.on_error_resume_next(xs, ys, zs)
Returns:
An observable sequence that concatenates the source sequences,
even if a sequence terminates exceptionally.
"""
sources_ = iter(sources)
def subscribe(observer, scheduler=None):
scheduler = scheduler or current_thread_scheduler
subscription = SerialDisposable()
cancelable = SerialDisposable()
def action(scheduler, state=None):
try:
source = next(sources_)
except StopIteration:
observer.on_completed()
return
# Allow source to be a factory method taking an error
source = source(state) if callable(source) else source
current = rx.from_future(source) if is_future(source) else source
d = SingleAssignmentDisposable()
subscription.disposable = d
def on_resume(state=None):
scheduler.schedule(action, state)
d.disposable = current.subscribe_(observer.on_next, on_resume, on_resume, scheduler)
cancelable.disposable = scheduler.schedule(action)
return CompositeDisposable(subscription, cancelable)
return Observable(subscribe) | ed604de1b566bc73b394143bb0b8bc487646ac1a | 3,652,691 |
def prompt_for_value(field_name: str, field_type):
"""Promt the user to input a value for the parameter `field_name`."""
print_formatted_text(
f"No value found for field '{field_name}' of type '{field_type}'. "
"Please enter a value for this parameter:"
)
response = prompt("> ")
while response == "":
print_formatted_text(f"No input received, please enter a value:")
response = prompt("> ")
return parse_value_from_string(response) | 3483b718f09d5a99a37a9d6086e462acf546cbf3 | 3,652,692 |
def move_j(joints, accel, vel):
"""
Function that returns UR script for linear movement in joint space.
Args:
joints: A list of 6 joint angles (double).
accel: tool accel in m/s^2
accel: tool accel in m/s^2
vel: tool speed in m/s
Returns:
script: UR script
"""
# TODO: Test
# TODO: Check acceleration and velocity are below set limit
joint_positions = _format_joint_positions(joints)
return "movej({}, a = {:.2f}, v = {:.2f})\n".format(joint_positions, abs(accel), abs(vel)) | 4798c58de190b026a7b4d59378c277880d8c8077 | 3,652,693 |
def create_dist_list(dist: str, param1: str, param2: str) -> list:
""" Creates a list with a special syntax describing a distribution
Syntax: [identifier, param1, param2 (if necessary)]
"""
dist_list: list = []
if dist == 'fix':
dist_list = ["f", float(param1)]
elif dist == 'binary':
dist_list = ["b", float(param1)]
elif dist == 'binomial':
dist_list = ["i", float(param1), float(param2)]
elif dist == 'normal':
dist_list = ["n", float(param1), float(param2)]
elif dist == 'uniform':
dist_list = ["u", float(param1), float(param2)]
elif dist == 'poisson':
dist_list = ["p", float(param1)]
elif dist == 'exponential':
dist_list = ["e", float(param1)]
elif dist == 'lognormal':
dist_list = ["l", float(param1), float(param2)]
elif dist == 'chisquare':
dist_list = ["c", float(param1)]
elif dist == 'standard-t':
dist_list = ["t", float(param1)]
return dist_list | 19cb93867639bcb4ae8152e4a28bb5d068a9c756 | 3,652,694 |
def compute_arfgm(t, qd, p, qw=0.0):
"""computes relative humidity from temperature, pressure and specific humidty (qd)
This might be similar to https://unidata.github.io/MetPy/latest/api/generated/metpy.calc.relative_humidity_from_specific_humidity.html
algorithm from RemapToRemo addgm
**Arguments:**
*p:*
atmospheric pressure ([Pa], 3d)
*t:*
temperature fields ([K], 3d)
*qd:*
specific humidity fields ([kg/kg], 3d)
*qw:*
liquid water content ([kg/kg], 3d)
**Returns:**
*relhum:*
relative humidity ([%],3d)
"""
# return fgqd(fgee(t),p)
#gqd = np.where(t >= C.B3, fgqd(fgew(t), p), fgqd(fgee(t), p))
fge = np.where(t >= C.B3, fgew(t), fgee(t))
gqd = fgqd(fge, p)
relhum = qd / gqd
return np.where(relhum > 1.0, (gqd + qw) / gqd, (qd + qw) / gqd) | 3e74be0b2099774482c32e2a3fbc4e2ee3f339fe | 3,652,695 |
import os
def getOutputMeshpath(path, meshtype=None):
"""Returns the folder path for mesh file export as specified in the GUI.
Phobos by default creates a directory 'meshes' in the export path and subsequently creates
sub-directories of "export/path/meshes" for every format, e.g. resulting in "export/path/mesh/obj"
for .obj file export.
Args:
path(str): export path root (set in the GUI)
meshtype(str, optional): a valid mesh type, otherwise the type set in the GUI is used (Default value = None)
Returns:
string: output path for meshes
"""
return os.path.join(path, 'meshes', meshtype if meshtype else getOutputMeshtype()) | 39336f2d02a58898d96a3d9a0c22d05716a46b50 | 3,652,696 |
def load_data():
""" loads the data for this task
:return:
"""
fpath = 'images/ball.png'
radius = 70
Im = cv2.imread(fpath, 0).astype('float32')/255 # 0 .. 1
# we resize the image to speed-up the level set method
Im = cv2.resize(Im, dsize=(0, 0), fx=0.5, fy=0.5)
height, width = Im.shape
centre = (width // 2, height // 2)
Y, X = np.ogrid[:height, :width]
phi = radius - np.sqrt((X - centre[0]) ** 2 + (Y - centre[1]) ** 2)
return Im, phi | 3caaa20ecb43853910f1d42667bd481bbe62e17d | 3,652,697 |
def create_response_body(input_json):
"""Create a json response with specific args of input JSON."""
city_name = str(input_json['name'])
country_code = str(input_json['sys']['country'])
temp_celsius = get_val_unit(input_json, 'main', 'temp', ' °C')
wind_speed = get_val_unit(input_json, 'wind', 'speed', ' m/s')
wind_deg = get_val_unit(input_json, 'wind', 'deg', ' deg')
cloudines = get_cloudines(input_json)
pressure = get_val_unit(input_json, 'main', 'pressure', ' hPa')
humidity_percent = get_val_unit(input_json, 'main', 'humidity', '%')
coord_lon = str(input_json['coord']['lon'])
coord_lat = str(input_json['coord']['lat'])
sunrise_hour = get_hour_time(input_json, 'sys', 'sunrise')
sunset_hour = get_hour_time(input_json, 'sys', 'sunset')
requested_time = get_datetime_from_unix(0, input_json, 'dt')
output_json = {
"location_name": f"{city_name}, {country_code}",
"temperature": temp_celsius,
"wind": f"{wind_speed}, {wind_deg}",
"cloudines": cloudines,
"pressure": pressure,
"humidity": humidity_percent,
"sunrise": sunrise_hour,
"sunset": sunset_hour,
"geo_coordinates": [coord_lat, coord_lon],
"requested_time": requested_time
}
return output_json | 4696f6d6929eea941697bf7aab49d139e4bd6229 | 3,652,698 |
import os
def get_GESLA_surge_filename(city):
"""Get the path to the file containing GESLA-2 surge data for ‘city’."""
if city == "Brest":
filename = "brest-brest-france-refmar_SkewSurges.txt"
elif city == "La Rochelle":
filename = "la_rochelle_la_palli-la_rochelle_la_palli-france-refmar_SkewSurges.txt"
elif city == "Saint-Jean-de-Luz":
filename = "saint_jean_de_luz_so-saint_jean_de_luz_so-france-refmar_SkewSurges.txt"
elif city == "Cherbourg":
filename = "cherbourg-cherbourg-france-refmar_SkewSurges.txt"
elif city == "Le Havre":
filename = "le_havre-le_havre-france-refmar_SkewSurges.txt"
elif city == "Newlyn":
# Choose one of the following files
# filename = "newlyn,_cornwall-294a-united_kingdom-uhslc_SkewSurges.txt" # 1915-2010, 2 MB
# filename = "newlyn-newlyn-glossdm-bodc_SkewSurges.txt" # 1916-1944, 600 kB
filename = "newlyn-p001-uk-bodc_SkewSurges.txt" # 1915-2014, 1.7 MB
elif city == "Vigo":
filename = "vigo-vigo-spain-ieo_SkewSurges.txt"
elif city == "La Coruna":
filename = "la_coruna-830a-spain-uhslc_SkewSurges.txt"
elif city == "Santander":
filename = "santander-sant-spain-ieo_SkewSurges.txt"
else:
raise ValueError("unknown city: " + repr(city))
return os.path.join(GESLA_DIR, filename) | e08fde26c527b6126cf7b9a7eb3ae720abc8ff19 | 3,652,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.