content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def is_private_bool(script_dict):
""" Returns is_private boolean value from user dictionary object """
return script_dict['entry_data']['ProfilePage'][0]['graphql']['user']['is_private'] | 1e8b30a38dc527dc5e2ea73e75c253d8f1a59550 | 3,653,122 |
def manage_greylist(request):
"""
View for managing greylist.
"""
message = None
if request.method == 'POST':
form = GreylistForm(request.POST)
if form.is_valid():
# Set details to empty string if blank
new_greylisted_guest = form.save(commit=False)
new_greylisted_guest.addedBy = request.user
new_greylisted_guest.save()
message = 'Successfully added entry to greylist'
else:
message = 'Error adding entry to greylist'
else:
form = GreylistForm()
context = {
'greylist': [
(
greylisting,
user_can_delete_greylisting(request.user, greylisting),
)
for greylisting in GreylistedGuest.objects.all().order_by('name')
],
'message': message,
'form': form,
}
return render(request, 'parties/greylist/manage.html', context) | eafbbf10b6150189d25c7d863cb00f6565648925 | 3,653,123 |
def get_regions():
"""Summary
Returns:
TYPE: Description
"""
client = boto3.client('ec2')
region_response = client.describe_regions()
regions = [region['RegionName'] for region in region_response['Regions']]
return regions | 700119f1c852ad9475823170388c062f62291637 | 3,653,124 |
def _is_ignored_read_event(request):
"""Return True if this read event was generated by an automated process, as
indicated by the user configurable LOG_IGNORE* settings.
See settings_site.py for description and rationale for the settings.
"""
if (
django.conf.settings.LOG_IGNORE_TRUSTED_SUBJECT
and d1_gmn.app.auth.is_trusted_subject(request)
):
return True
if (
django.conf.settings.LOG_IGNORE_NODE_SUBJECT
and d1_gmn.app.auth.is_client_side_cert_subject(request)
):
return True
if _has_regex_match(
request.META["REMOTE_ADDR"], django.conf.settings.LOG_IGNORE_IP_ADDRESS
):
return True
if _has_regex_match(
request.META.get("HTTP_USER_AGENT", "<not provided>"),
django.conf.settings.LOG_IGNORE_USER_AGENT,
):
return True
if _has_regex_match(
request.primary_subject_str, django.conf.settings.LOG_IGNORE_SUBJECT
):
return True
return False | f6f7417fe923ef6bd56a6d649ef302ed811185e8 | 3,653,125 |
def aten_embedding(mapper, graph, node):
""" 构造embedding的PaddleLayer。
TorchScript示例:
%inputs_embeds.1 : Tensor = aten::embedding(%57, %input_ids.1, %45, %46, %46)
参数含义:
%inputs_embeds.1 (Tensor): 输出,embedding后的结果。
%57 (Tensor): weights。
%input_ids.1 (Tensor): 需要进行embedding的特征层。
%45 (int): padding_idx。
%46 (bool): scale_grad_by_freq。
%46 (bool): sparse。
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("embedding", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%57
weights = mapper.pytorch_params[inputs_name[0]]
mapper.paddle_params[op_name + ".weight"] = weights
layer_attrs["num_embeddings"] = weights.shape[0]
layer_attrs["embedding_dim"] = weights.shape[1]
# 处理输入1,即%input_ids.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["input"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入2,即%45
if mapper.attrs[inputs_name[2]] == -1:
layer_attrs["padding_idx"] = None
else:
layer_attrs["padding_idx"] = mapper.attrs[inputs_name[2]]
# 处理输入4,即%46
layer_attrs["sparse"] = mapper.attrs[inputs_name[4]]
graph.add_layer(
"paddle.nn.Embedding",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs | d174c7e551bb3db7e7dc5d9014de9edd48ee4032 | 3,653,126 |
def _validate_opts(opts):
"""
Check that all of the types of values passed into the config are
of the right types
"""
def format_multi_opt(valid_type):
try:
num_types = len(valid_type)
except TypeError:
# Bare type name won't have a length, return the name of the type
# passed.
return valid_type.__name__
else:
def get_types(types, type_tuple):
for item in type_tuple:
if isinstance(item, tuple):
get_types(types, item)
else:
try:
types.append(item.__name__)
except AttributeError:
log.warning(
"Unable to interpret type %s while validating "
"configuration",
item,
)
types = []
get_types(types, valid_type)
ret = ", ".join(types[:-1])
ret += " or " + types[-1]
return ret
errors = []
err = (
"Config option '{0}' with value {1} has an invalid type of {2}, a "
"{3} is required for this option"
)
for key, val in opts.items():
if key in VALID_OPTS:
if val is None:
if VALID_OPTS[key] is None:
continue
else:
try:
if None in VALID_OPTS[key]:
continue
except TypeError:
# VALID_OPTS[key] is not iterable and not None
pass
if isinstance(val, VALID_OPTS[key]):
continue
if hasattr(VALID_OPTS[key], "__call__"):
try:
VALID_OPTS[key](val)
if isinstance(val, (list, dict)):
# We'll only get here if VALID_OPTS[key] is str or
# bool, and the passed value is a list/dict. Attempting
# to run int() or float() on a list/dict will raise an
# exception, but running str() or bool() on it will
# pass despite not being the correct type.
errors.append(
err.format(
key, val, type(val).__name__, VALID_OPTS[key].__name__
)
)
except (TypeError, ValueError):
errors.append(
err.format(
key, val, type(val).__name__, VALID_OPTS[key].__name__
)
)
continue
errors.append(
err.format(
key, val, type(val).__name__, format_multi_opt(VALID_OPTS[key])
)
)
# Convert list to comma-delimited string for 'return' config option
if isinstance(opts.get("return"), list):
opts["return"] = ",".join(opts["return"])
for error in errors:
log.warning(error)
if errors:
return False
return True | cafd1048a7496728715a192a4f70c7d50ade3622 | 3,653,128 |
async def from_string(input, output_path=None, options=None):
"""
Convert given string or strings to PDF document
:param input: string with a desired text. Could be a raw text or a html file
:param output_path: (optional) path to output PDF file. If not provided,
PDF will be returned as string
:param options: (optional) dict to configure pyppeteer page.pdf action
Returns: output_path if provided else PDF Binary
"""
sources = Source(input, 'string')
r = PDFMate(sources, options=options)
return await r.to_pdf(output_path) | 2b3b6d9523d516fd3d258a3f722655720f49d91b | 3,653,129 |
def parse_tuple(tuple_string):
"""
strip any whitespace then outter characters.
"""
return tuple_string.strip().strip("\"[]") | d0052dce0582ca04d70455f1833d98545792c8ac | 3,653,130 |
def create_size():
"""Create a new size."""
in_out_schema = SizeSchema()
try:
new_size = in_out_schema.load(request.json)
except ValidationError as err:
abort(400, {'message': err.messages})
try:
db.session.add(new_size)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
return in_out_schema.jsonify(new_size) | f85b339c5ec5c38b8778de25456caa6fb0680d76 | 3,653,131 |
import click
from typing import Optional
def inject_snakefmt_config(
ctx: click.Context, param: click.Parameter, config_file: Optional[str] = None
) -> Optional[str]:
"""
If no config file argument provided, parses "pyproject.toml" if one exists.
Injects any parsed configuration into the relevant parameters to the click `ctx`.
"""
if config_file is None:
config_file = find_pyproject_toml(ctx.params.get("src", ()))
config = read_snakefmt_config(config_file)
if ctx.default_map is None:
ctx.default_map = {}
ctx.default_map.update(config) # type: ignore # bad types in .pyi
return config_file | 4d1fc2996db4c63070f67ef6b19387b2b30ac5cd | 3,653,132 |
def sort_by_ctime(paths):
"""Sorts list of file paths by ctime in ascending order.
Arg:
paths: iterable of filepaths.
Returns:
list: filepaths sorted by ctime or empty list if ctime is unavailable.
"""
ctimes = list(map(safe_ctime, paths))
if not all(ctimes) or len(set(ctimes)) <= 1:
return []
else:
return sorted(paths, key=lambda fp: safe_ctime(fp)) | 551b7bc1d2cdc416588cbd783c9b1ac3e5914077 | 3,653,133 |
def get_ospf_metric(device,
destination_address):
"""Get OSPF metric
Args:
device (obj): Device object
destination_address (str): Destination address
"""
out = device.parse('show route')
# Example dictionary
# "route-table": [
# {
# "active-route-count": "0",
# "destination-count": "0",
# "hidden-route-count": "0",
# "holddown-route-count": "0",
# "rt": [
# {
# "metric": "101",
# }
# },
rt_list = Dq(out).get_values('rt')
for rt_dict in rt_list:
rt_destination_ = Dq(rt_dict).get_values("rt-destination", 0)
if not isinstance(rt_destination_, list):
if rt_destination_.startswith(str(destination_address)):
metric_ = Dq(rt_dict).get_values('metric', 0)
if not metric_:
continue
return metric_
return None | f5cd44794389a28db647e815baac4e954d59757b | 3,653,134 |
def get_episode_url():
"""エピソードの配信URLを追加
Returns:
[type]: [description]
"""
# フォームの値を取得
episode_num = "#"+request.form['episode_num'][0]
print(episode_num)
# 配信先一覧を取得
podcasts = Podcast.query.all()
broadcasts = Broadcast.query.all()
# 配信先 url
broadcast_urls = {}
for br in broadcasts:
broadcast_urls[br.broadcast_service] = br.broadcast_url
# エピソードのurlを取得
episode_urls = get_episode_url_all(broadcast_urls, episode_num)
return render_template(
'podcasts.html',
podcasts=podcasts,
broadcasts=broadcasts,
episode_num=episode_num,
episode_urls=episode_urls
) | e27f0324fd8332aa0648d35630cbb88b2b36c721 | 3,653,135 |
def autofs():
"""Fixture data from /proc/mounts."""
data = "flux-support -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 flux-support.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/f/flux-support\numms-remills -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 umms-remills.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/u/umms-remills"
return data | ea53c34d863de69c15f1e1247b98599c5f365ab7 | 3,653,136 |
def flag_dims(flags):
"""Return flag names, dims, and initials for flags.
Only flag value that correspond to searchable dimensions are
returned. Scalars and non-function string values are not included
in the result.
"""
dims = {}
initials = {}
for name, val in flags.items():
try:
flag_dim, initial = _flag_dim(val, name)
except ValueError:
pass
else:
dims[name] = flag_dim
initials[name] = initial
names = sorted(dims)
return (names, [dims[name] for name in names], [initials[name] for name in names]) | 4cafd991e21facacf36423028288e4c5bb10c8d9 | 3,653,137 |
def to_stack(df, col, by, transform=None, get_cats=False):
""" Convert columns of a dataframe to a list of lists by 'by'
Args:
df:
col:
by:
transform:
Returns:
"""
g = df.groupby(by)
transform = _notransform if transform is None else transform
x_data = []
for gr in g.groups:
x_data.append(transform(g.get_group(gr)[col].values))
cats = np.array([gg for gg in g.groups])
x_len = np.array([len(x) for x in x_data])
inds = x_len.argsort()
# print(cats)
# print(inds)
if get_cats:
return [x_data[i] for i in inds], cats[inds]
return [x_data[i] for i in inds] | 7bbf0ff609aaf2a6f5b49f80128ad06c04f93b5c | 3,653,139 |
from typing import List
def entries_repr(entries: List[Metadata]) -> str:
"""
Generates a nicely formatted string repr from a list of Dropbox metadata.
:param entries: List of Dropbox metadata.
:returns: String representation of the list.
"""
str_reps = [
f"<{e.__class__.__name__}(path_display={e.path_display})>" for e in entries
]
return "[" + ",\n ".join(str_reps) + "]" | cc768a662ac6440ef7d5ca0eaddff5205a7c0a8c | 3,653,140 |
def frequency_encode(dftrain, dftest, columnlist, output_type="include"):
"""
Frequency encode columns in columnlist.
Parameters:
dftrain: [DataFrame] train set
dftest: [DataFrame] test set
columnlist: [list] columns to encode.
output_type: [str], default="include" will include the columns in the same dataframes.
If "separate", returns separate dataframes.
Returns:
dftrain_freq: [DataFrame] train
dftest_freq: [DataFrame] test
Author: kmp
"""
if output_type is "include":
for col in columnlist:
col_freqs = dftrain.fillna({col:'NA'})[col].value_counts(normalize=True)
dftrain[col+'_freq'] = dftrain.fillna({col:'NA'})[col].map(col_freqs)
dftest[col+'_freq'] = dftest.fillna({col:'NA'})[col].map(col_freqs).fillna(0)
dftrain_freq = dftrain
dftest_freq = dftest
else:
dftrain_freq = pd.DataFrame(index=dftrain.index)
dftest_freq = pd.DataFrame(index=dftest.index)
for col in columnlist:
col_freqs = dftrain.fillna({col:'NA'})[col].value_counts(normalize=True)
dftrain_freq[col+'_freq'] = dftrain.fillna({col:'NA'})[col].map(col_freqs)
dftest_freq[col+'_freq'] = dftest.fillna({col:'NA'})[col].map(col_freqs).fillna(0)
return dftrain_freq, dftest_freq | 3380853f0b5f88a6b2392a657424c4fc326876e2 | 3,653,141 |
def get_ranked_results(completed_rounds):
"""
For the rounds given in completed_rounds, calculate the total score for each team.
Then all teams are sorted on total score and are given a ranking to allow for ex aequo scores.
"""
results = []
for team in QTeam.objects.all():
teamtotal = 0
for a in team.qanswer_set.all():
# Only add results for complete rounds
if a.rnd in completed_rounds:
teamtotal += a.score
results.append((team.team_name, teamtotal))
# Sort the results
sorted_results = sorted(results, reverse=True, key=lambda tup: tup[1])
rank, count, previous, ranking = 0, 0, None, []
for key, num in sorted_results:
count += 1
if num != previous:
rank += count
previous = num
count = 0
ranking.append((rank, key, num))
return ranking | cea2afa2bb8de1db82450f323274af94ad3b633f | 3,653,142 |
def get_subgraphs():
"""
Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).
:return: A list of lists of dictionaries.
"""
subgraph_list = [c.get("color") for c in classes if c.get("color") is not None]
subgraphs = []
# Add to subgraphs all the lists of actual subgraphs
for c in subgraph_list:
sub = [cl for cl in classes if cl.get("color") == c and cl]
if sub not in subgraphs:
subgraphs.append(sub)
# Now add to subgraphs all the items (as lists) that don't belong to a subsystem
for c in classes:
if c.get("color") is None:
sub = [c]
subgraphs.append(sub)
return subgraphs | 5e9b766b2c7f58d71eac62d88be64096272d2511 | 3,653,143 |
def score(self, features):
""" return score from ML models"""
assert len(self._models) > 0, 'No valid prediction model'
scores = list()
for feature in features:
# when feature list extraction fails
if not feature:
scores.append(-float('inf'))
continue
item = list()
for ins in self._models:
item.append(ins.inference(feature))
pred = [i for i in item if i]
scores.append(float(sum(pred)/len(pred)))
return scores | 413eb4a0ecdcf0ac4b8f9cf9643b08a839c78b9a | 3,653,144 |
def fromRGB(rgb):
"""Convert tuple or list to red, green and blue values that can be accessed as follows:
a = fromRGB((255, 255, 255))
a["red"]
a["green"]
a["blue"]
"""
return {"red":rgb[0], "green":rgb[1], "blue":rgb[2]} | 205a8f189d177e7af5cdc686e7c52fd2053a3c87 | 3,653,145 |
import math
def computeTelescopeTransmission(pars, offAxis):
"""
Compute tel. transmission (0 < T < 1) for a given set of parameters
as defined by the MC model and for a given off-axis angle.
Parameters
----------
pars: list of float
Parameters of the telescope transmission. Len(pars) should be 4.
offAxis: float
Off-axis angle in deg.
Returns
-------
float
Telescope transmission.
"""
_degToRad = math.pi / 180.0
if pars[1] == 0:
return pars[0]
else:
t = math.sin(offAxis * _degToRad) / (pars[3] * _degToRad)
return pars[0] / (1.0 + pars[2] * t ** pars[4]) | 50b2e2908726b8a77bc83a2821cf760b7475300b | 3,653,146 |
def mean_iou(
results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category IoU, shape (num_classes, ).
"""
all_acc, acc, iou = eval_seg_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=["mIoU"],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label,
)
return all_acc, acc, iou | a6d90cb4028c831db82b4dddb6a4c52a8fa4e1f0 | 3,653,147 |
def as_date_or_none(date_str):
"""
Casts a date string as a datetime.date, or None if it is blank.
>>> as_date_or_none('2020-11-04')
datetime.date(2020, 11, 4)
>>> as_date_or_none('')
None
>>> as_date_or_none(None)
None
"""
if not date_str:
return None
return dateutil_parser.parse(date_str).date() | bf01bd280526e7962e1b08aa0400d6ebadf8053f | 3,653,148 |
def guarantee_trailing_slash(directory_name: str) -> str:
"""Adds a trailling slash when missing
Params:
:directory_name: str, required
A directory name to add trailling slash if missing
Returns:
A post processed directory name with trailling slash
"""
if not directory_name.endswith('/'):
return directory_name + '/'
return directory_name | 38cfdf971262fceb4888277522b22ba7276fa9b7 | 3,653,149 |
def bc32encode(data: bytes) -> str:
"""
bc32 encoding
see https://github.com/BlockchainCommons/Research/blob/master/papers/bcr-2020-004-bc32.md
"""
dd = convertbits(data, 8, 5)
polymod = bech32_polymod([0] + dd + [0, 0, 0, 0, 0, 0]) ^ 0x3FFFFFFF
chk = [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
return "".join([BECH32_ALPHABET[d] for d in dd + chk]) | 46feb2b744089f5f4bf84cae6ff9d29623b3bba5 | 3,653,150 |
def read_all_reviews(current_user):
"""Reads all Reviews"""
reviews = Review.query.all()
if reviews:
return jsonify({'Reviews': [
{
'id': review.id,
'title': review.title,
'desc': review.desc,
'reviewer': review.reviewer.username,
'business': review.business.name,
'created_at': review.created_at,
'updated_at': review.updated_at
} for review in reviews
]}), 200
return jsonify({'warning': 'No Review, create one first'}), 200 | 78642f38dab8328c11445e67848b7f6d9583d892 | 3,653,151 |
def matches_filters(row, field_to_index, transformed_filters):
"""
Validate field name in transformed filter_expressions, return TRUE for rows matching all filters
Parameters
------------
row : str
row in `list` registry table (manager.show())
field_to_index : dict
key = column names, val = column index, in registry table (or manager.show())
transformed_filters : list
transformed/formatted fields for filtering rows
Returns
--------
bool
return TRUE for rows matching all filters
"""
field_to_index_lower = dict(
(k.lower(), v) for k, v in field_to_index.items()
) # to accept case-insensitive comparison
for tfilter in transformed_filters:
[field, op, value] = tfilter
if field not in field_to_index_lower:
raise DSGInvalidParameter(
f"field='{field}' is not a valid column name, valid fields: {list(field_to_index.keys())}"
)
obj_val = row[field_to_index_lower[field]].lower() # to accept case-insensitive comparison
if not matches_filter(val=obj_val, op=op, required_value=value):
return False
return True | 119b5e7d7f7dfb72e1a66525d5bf84665cbbced0 | 3,653,152 |
def div(f, other):
"""Element-wise division applied to the `Functional` objects.
# Arguments
f: Functional object.
other: A python number or a tensor or a functional object.
# Returns
A Functional.
"""
validate_functional(f)
inputs = f.inputs.copy()
if is_functional(other):
inputs += to_list(other.inputs)
lmbd = [Lambda(lambda x: x[0]/x[1], name=graph_unique_name("div")) for X in f.outputs]
else:
_warn_for_ndarray(other)
lmbd = [Lambda(lambda x: x/other, name=graph_unique_name("div")) for X in f.outputs]
Functional = f.get_class()
res = Functional(
inputs = unique_tensors(inputs),
outputs = _apply_operation(lmbd, f, other),
layers = lmbd
)
return res | abfc7df85946cfcd5196dff58bec22ee237b590b | 3,653,153 |
def _gen_input(storyline, nsims, mode, site, chunks, current_c, nperc, simlen, swg_dir, fix_leap):
"""
:param storyline: loaded storyline
:param SWG_path: path to the directory with contining the files from the SWG
:param nsims: number of sims to run
:param mode: one of ['irrigated', 'dryland']
:param site: one of ['eyrewell', 'oxford']
:param chunks: the number of chunks
:param current_c: the current chunk (from range(chunks)
:param nperc: number of simulations that can be run per chunk
:return:
"""
# manage chunks
if chunks == 1:
num_to_pull = nsims
elif chunks > 1:
num_to_pull = nperc
if current_c + 1 == chunks:
# manage last chunk
num_to_pull = nsims - (current_c * nperc)
else:
raise ValueError('shouldnt get here')
params, doy_irr = get_params_doy_irr(mode, site)
matrix_weathers = []
days_harvests = []
# get restriction data
if mode == 'dryland':
rest_data = np.repeat([None], num_to_pull)
elif mode == 'irrigated':
rest_data = get_irr_data(num_to_pull, storyline, simlen)
else:
raise ValueError('weird arg for mode: {}'.format(mode))
# get weather data
weather_data = _get_weather_data(storyline=storyline, nsims=num_to_pull, simlen=simlen, swg_dir=swg_dir, site=site,
fix_leap=fix_leap)
# make all the other data
for rest, weather in zip(rest_data, weather_data):
if rest is None:
rest_temp = None
else:
rest_temp = pd.DataFrame(data=rest, index=weather.index, columns=['frest'])
matrix_weather = create_matrix_weather(mode, weather_data=weather, restriction_data=rest_temp,
rest_key='frest', fix_leap=fix_leap)
matrix_weathers.append(matrix_weather)
days_harvests.append(create_days_harvest(mode, matrix_weather, site, fix_leap=fix_leap))
return params, doy_irr, matrix_weathers, days_harvests | d0594a3b986c1415202db5f894101537464355a8 | 3,653,155 |
def guess_mime_mimedb (filename):
"""Guess MIME type from given filename.
@return: tuple (mime, encoding)
"""
mime, encoding = None, None
if mimedb is not None:
mime, encoding = mimedb.guess_type(filename, strict=False)
if mime not in ArchiveMimetypes and encoding in ArchiveCompressions:
# Files like 't.txt.gz' are recognized with encoding as format, and
# an unsupported mime-type like 'text/plain'. Fix this.
mime = Encoding2Mime[encoding]
encoding = None
return mime, encoding | 8202551c81b25e9bb104ec82114a750a16556b23 | 3,653,156 |
def get_members():
"""
Get a list of all members in FreeIPA
"""
members = []
ldap_conn = ldap.get_con()
res = ldap_conn.search_s(
"cn=users,cn=accounts,dc=csh,dc=rit,dc=edu",
pyldap.SCOPE_SUBTREE,
"(uid=*)",
["uid", "displayName"],
)
for member in res:
members.append(
{
"value": member[1]["uid"][0].decode("utf-8"),
"display": member[1]
.get("displayName", member[1]["uid"])[0]
.decode("utf-8"),
}
)
return members | 2714bddf7554884fa638066f91aa489b497f6c15 | 3,653,158 |
def _unicode_decode_extracted_tb(extracted_tb):
"""Return a traceback with the string elements translated into Unicode."""
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb] | bbe020daecf6dc7021ff38dfac6869646120be5d | 3,653,159 |
def load_table(source, version):
"""Load synth table from file
"""
filepath = get_table_filepath(source, version=version)
return pd.read_table(filepath, delim_whitespace=True) | b95d35a6f297e0f73fee3652a0c9c6942b792451 | 3,653,160 |
def single_spaces(string: str) -> str:
"""Replaces all instances of whitespace-like chars with single spaces
Args:
string (str): The string to modify
Returns:
str: The cleaned string
"""
return UGLY_SPACES_RE.sub(" ", string) | eb37ae691f7fb54b6a23a5fd6d2cdd3edf8ebf57 | 3,653,161 |
def create_group(api_key: str, board_id: str, group_name: str, *args, **kwargs):
"""Creates a new group in a specific board.
__________
Parameters
api_key : `str`
The monday.com v2 API user key.
board_id : `str`
The board's unique identifier.
group_name : `str`
The name of the new group.
args : `tuple`
The list of group return fields.
kwargs : `dict`
Optional arguments for querying assets.
_______
Returns
data : `dict`
A monday.com group in item form.
_____________
Return Fields
archived : `bool`
Is the group archived or not.
color : `str`
The group's color.
deleted : `bool`
Is the group deleted or not.
id : `str`
The group's unique identifier.
items : `list[moncli.entities.Item]`
The items in the group.
position : `str`
The group's position in the board.
title : `str`
The group's title.
"""
args = get_field_list(constants.DEFAULT_GROUP_QUERY_FIELDS, *args)
kwargs = get_method_arguments(constants.CREATE_GROUP_OPTIONAL_PARAMS, **kwargs)
kwargs['board_id'] = util.IntValue(board_id)
kwargs['group_name'] = util.StringValue(group_name)
return execute_mutation(api_key, constants.CREATE_GROUP, *args, **kwargs) | b591fe000718615f44954e488d4e3c46b9cf0123 | 3,653,163 |
import cvxopt
def _solve_qp_ik_vel(vel, jac, joint_pos, joint_lims=None, duration=None, margin=0.2):
"""
Solves the IK for a given pusher velocity using a QP solver, imposing joint limits.
If the solution is optimal, it is guaranteed that the resulting joint velocities will not
cause the joints to reach their limits (minus the margin) in the specified duration of time
:param vel: desired EE velocity (6 values)
:param jac: jacobian
:param joint_pos: current joint positions
:param joint_lims: matrix of joint limits; if None, limits are not imposed
:param duration: how long the specified velocity will be kept (in seconds); if None, 2.0 is used
:param margin: maximum absolute distance to be kept from the joint limits
:return: tuple with the solution (as a numpy array) and with a boolean indincating if the result is optimal or not
:type vel: np.ndarray
:type jac: np.ndarray
:type joint_pos: np.ndarray
:type joint_lims: np.ndarray
:type duration: float
:type margin: float
:rtype: (np.ndarray, bool)
"""
x_len = len(joint_pos)
P = cvxopt.matrix(np.identity(x_len))
A = cvxopt.matrix(jac)
b = cvxopt.matrix(vel)
q = cvxopt.matrix(np.zeros(x_len))
if duration is None:
duration = 2.
if joint_lims is None:
G, h = None, None
else:
G = duration * np.identity(x_len)
h = np.zeros(x_len)
for i in range(x_len):
dist_up = abs(joint_lims[i, 1] - joint_pos[i])
dist_lo = abs(joint_lims[i, 0] - joint_pos[i])
if dist_up > dist_lo:
# we are closer to the lower limit
# => must bound negative angular velocity, i.e. G_ii < 0
h[i] = dist_lo
G[i, i] *= -1
else:
# we are closer to the upper limit
# => must bound positive angular velocity, i.e. G_ii > 0
h[i] = dist_up
h = cvxopt.matrix(h - margin)
G = cvxopt.matrix(G)
# sol = cvxopt.solvers.qp(P, q, A=A, b=b, G=G, h=h, options={'show_progress': False, 'kktreg': 1e-9}, kktsolver='ldl')
sol = cvxopt.solvers.qp(P, q, A=A, b=b, G=G, h=h, options={'show_progress': False, 'refinement': 5})
x = np.array(sol['x']).reshape(-1)
optimal = sol['status'] == 'optimal'
return x, optimal | 25bd82403421f936d81d1a5c3090c1fbb1a964c1 | 3,653,164 |
def channel_will_be_next(crontab: str):
"""Checks if the given notification channel will be activated on the
next channel, in an hour."""
return pycron.is_now(crontab, now + timedelta(hours=1)) | b5505d7e27d70377cfb58acab8a38d9bd12d9351 | 3,653,165 |
def hospital_resident(residents, hospitals, optimal="resident"):
"""Solve an instance of HR using an adapted Gale-Shapley algorithm
:cite:`Rot84`. A unique, stable and optimal matching is found for the given
set of residents and hospitals. The optimality of the matching is found with
respect to one party and is subsequently the worst stable matching for the
other.
Parameters
----------
residents : list of Player
The residents in the game. Each resident must rank a non-empty subset
of the elements of ``hospitals``.
hospitals : list of Hospital
The hospitals in the game. Each hospital must rank all the residents
that have ranked them.
optimal : str, optional
Which party the matching should be optimised for. Must be one of
``"resident"`` and ``"hospital"``. Defaults to the former.
Returns
-------
matching : Matching
A dictionary-like object where the keys are the members of
``hospitals``, and the values are their matches ranked by preference.
"""
if optimal == "resident":
return resident_optimal(residents, hospitals)
if optimal == "hospital":
return hospital_optimal(hospitals) | e666b502a2e74f5c4628108397a82977b7da5b7f | 3,653,166 |
def log_request(response):
"""Log request.
:param response:
:return:
"""
ip = request.headers.get('X-Forwarded-For', request.remote_addr)
host = request.host.split(':', 1)[0]
app.logger.info(f"method={request.method}, path={request.path}, "
f"status={response.status_code}, "
f"ip={ip}, host={host}, params={dict(request.args)},"
f"headers={request.headers}, "
f"body={request.data}")
return response | 838df023329b8b49c2349e58d02b44ef51ef7213 | 3,653,167 |
def reduce(path, n_procs, column, function):
""" Calculate an aggregate value from IMB output.
Args:
path: str, path to file
n_procs: int, number of processes
column: str, column name
function: callable to apply to specified `column` of table for `n_procs` in `path`
"""
tables = read_imb_out(path)
table = tables[n_procs] # separate lines here for more useful KeyError if missing:
col = table[column]
result = function(col)
return result | e2892b862f02ca11acaa180e24d390804441f0db | 3,653,168 |
from pathlib import Path
def output_file_path(status_id, phase):
"""
"""
BASE_DIR = Path(__file__).resolve().parent.parent
return f"%s/logs/stage/{status_id}-{phase}.txt" %str(BASE_DIR) | 3bcbd80ad95389b9cf37fa66923bacb819ede710 | 3,653,169 |
def clean(some_string, uppercase=False):
"""
helper to clean up an input string
"""
if uppercase:
return some_string.strip().upper()
else:
return some_string.strip().lower() | cdc4587b762625e00c91189950bd45840861c93f | 3,653,170 |
import re
def to_title(value):
"""Converts a string into titlecase."""
t = re.sub("\s+", ".", value)
t = filter(LETTER_SET.__contains__, t)
t = re.sub("([a-z])'\W([A-Z])", lambda m: m.group(0).lower(), t.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t) | a88c9559abeab7426fa874e66c9e81a75138c0cd | 3,653,171 |
import yaml
def parse_config_or_kwargs(config_file, **kwargs):
"""parse_config_or_kwargs
:param config_file: Config file that has parameters, yaml format
:param **kwargs: Other alternative parameters or overwrites for config
"""
with open(config_file) as con_read:
yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)
# values from config file are all possible params
arguments = dict(yaml_config, **kwargs)
# In case some arguments were not passed, replace with default ones
for key, value in DEFAULT_ARGS.items():
arguments.setdefault(key, value)
return arguments | f36946ed3a05f32057786ddf8e4194b935b4c129 | 3,653,172 |
def sig_generacion(m):
"""Devuelve la matriz resultante de aplicar las reglas del juego a cada celda"""
FILAS = len(m)
COLUMNAS = len(m[0]) if len(m) else 0
new_m = [] # matriz resultado
for i in range(FILAS):
l = [] # Una lista para ir generando una fila
for j in range(COLUMNAS):
vec = num_vecinos(m, j, i)
if vec < 2 or vec > 3:
l.append(0) # muere
elif vec == 3:
l.append(1) # nace
else:
l.append(m[i][j]) # sobrevive si estaba viva
new_m.append(l)
return new_m | 09da2baede2eef22179218f267bc2325d72822ee | 3,653,173 |
import hmac
import hashlib
import base64
def calc_file_signature(data: str, password: str = None) -> str:
"""
Função que calcula o has da assinatura de um arquivo
@param data: string assinada
@param password: senha da assinatura
@return: hash da assinatura
"""
if (password):
digest = hmac.new(bytes(password), msg=bytes(data), digestmod=hashlib.sha256).digest()
res_hash = base64.b64encode(digest).decode()
else:
hash = hashlib.sha256()
hash.update(bytes(data))
res_hash = hash.hexdigest()
return res_hash | 1422b8058a6eb7995558b3e0a7fa5f33f6cfd134 | 3,653,174 |
def get_angle_from_coordinate(lat1, long1, lat2, long2):
"""https://stackoverflow.com/questions/3932502/calculate-angle-between-two-latitude-longitude-points"""
dLon = (long2 - long1)
y = np.sin(dLon) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(dLon)
brng = np.arctan2(y, x)
brng = np.degrees(brng)
brng = (brng + 360) % 360
brng = 360 - brng
return brng | a1ad7ffe1e63197cc5f70b2ce2f343078fd9b5e7 | 3,653,175 |
import json
def get_predictions():
"""Return the list of predications as a json object"""
results = []
conn = None
columns = ("pid", "name", "location", "latitude", "longitude", "type", "modtime")
try:
conn = psycopg2.connect(db_conn)
# create a cursor
cur = conn.cursor()
cur.execute(
"SELECT pid, name, location, latitude, longitude, type, modtime FROM predictions"
)
for row in cur.fetchall():
results.append(dict(zip(columns, row)))
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print("Database connection closed.")
json_str = json.dumps(results, indent=2, sort_keys=True, default=json_serial)
return Response(json_str, mimetype="application/json") | 6afb9d703f4dbeff81d4369f9096d577dcafc993 | 3,653,176 |
def parse_packageset(packageset):
"""
Get "input" or "output" packages and their repositories from each PES event.
:return: set of Package tuples
"""
return {parse_package(p) for p in packageset.get('package', packageset.get('packages', []))} | ff8af3423c0fda993cfa88be16142520e29b999e | 3,653,177 |
def pretty_print_large_number(number):
"""Given a large number, it returns a string of the sort: '10.5 Thousand' or '12.3 Billion'. """
s = str(number).ljust(12)
if number > 0 and number < 1e3:
pass
elif number >= 1e3 and number < 1e6:
s = s + " (%3.1f Thousand)" % (number * 1.0 / 1e3)
elif number >= 1e6 and number < 1e9:
s = s + " (%3.1f Million)" % (number * 1.0 / 1e6)
elif number >= 1e9 and number < 1e12:
s = s + " (%3.1f Billion)" % (number * 1.0 / 1e9)
elif number >= 1e12 and number < 1e15:
s = s + " (%3.1f Trillion)" % (number * 1.0 / 1e12)
return s | 6762f34744da360b36d4a4fc0659fcf7d3fb0465 | 3,653,179 |
def find_aligning_transformation(skeleton, euler_frames_a, euler_frames_b):
"""
performs alignment of the point clouds based on the poses at the end of
euler_frames_a and the start of euler_frames_b
Returns the rotation around y axis in radians, x offset and z offset
"""
point_cloud_a = convert_euler_frame_to_cartesian_frame(skeleton, euler_frames_a[-1])
point_cloud_b = convert_euler_frame_to_cartesian_frame(skeleton, euler_frames_b[0])
weights = skeleton.get_joint_weights()
theta, offset_x, offset_z = _align_point_clouds_2D(point_cloud_a, point_cloud_b, weights)
return theta, offset_x, offset_z | 1d323fcb0af73aacbc57e5cf57f0b9875375b98d | 3,653,180 |
def find_all_visit(tx):
"""
Method that queries the database to find all VISIT relationships
:param tx: session
:return: nodes of Person , Location
"""
query = (
"""
MATCH (p:Person)-[r:VISIT]->(l:Location)
RETURN p , ID(p) , r , r.start_hour , r.end_hour , r.date , l , ID(l)
"""
)
result = tx.run(query).data()
return result | 851d790b16f9db285a6d09b5cabc4e12ad364484 | 3,653,181 |
def read_vectors(filename):
"""Reads measurement vectors from a space or comma delimited file.
:param filename: path of the file
:type filename: str
:return: array of vectors
:rtype: numpy.ndarray
:raises: ValueError
"""
vectors = []
data = read_csv(filename)
expected_size = len(data[0])
if expected_size % 3 != 0:
raise ValueError('Column size of vector data must be a multiple of 3')
for row in data:
if len(row) == expected_size:
vectors.append(row)
else:
raise ValueError('Inconsistent column size of vector data')
result = np.array(vectors, np.float32)
if not np.isfinite(result).all():
raise ValueError('Non-finite value present in vector data')
return result | a772c4185d55543e0c641271a5af699f91e81b95 | 3,653,182 |
def get_scoring_algorithm():
""" Base scoring algorithm for index and search """
return scoring.BM25F() | 78fe59d02071ce000262208f4c228566e0747857 | 3,653,183 |
def _make_augmentation_pipeline(augmentation_list):
"""Buids an sklearn pipeline of augmentations from a tuple of strings.
Parameters
----------
augmentation_list: list of strings, A list of strings that determine the
augmentations to apply, and in which order to apply them (the first
string will be applied first). Possible augmentation strings are
['leadlag', 'ir', 'addtime', 'cumsum', 'basepoint']
Returns
-------
sklearn.Pipeline
The transforms, in order, as an sklearn pipeline.
Examples
--------
augementations = ('leadlag', 'ir', 'addtime')
_make_augmentation_pipeline(augmentations)
# Will return
Pipeline([
('leadlag', LeadLag()),
('ir', InvisibilityReset()),
('addtime', AddTime())
])
"""
# Dictionary of augmentations
AUGMENTATIONS = {
"leadlag": _LeadLag(),
"ir": _InvisibilityReset(),
"addtime": _AddTime(),
"cumsum": _CumulativeSum(),
"basepoint": _BasePoint(),
}
# Assertions, check we have a tuple/list
if augmentation_list is not None:
if isinstance(augmentation_list, str):
augmentation_list = (augmentation_list,)
assert all(
[x in list(AUGMENTATIONS.keys()) for x in augmentation_list]
), "augmentation_list must only contain string elements from {}. Given: {}.".format(
list(AUGMENTATIONS.keys()), augmentation_list
)
# Setup pipeline
if augmentation_list is not None:
pipeline = Pipeline(
[(tfm_str, AUGMENTATIONS[tfm_str]) for tfm_str in augmentation_list]
)
else:
pipeline = None
return pipeline | e53f4d198e6781c5eaf6ce6c0a453801f4ceb0d7 | 3,653,184 |
def ctg_path(event_name,sc_reform,path_cache,var_map,model,prev_events):
"""
Recursively computes the controllable and contigent events that influence
the schedule of a given event.
"""
if event_name in path_cache:#If solution has been already computed, use it
return path_cache[event_name]
else:
if event_name in sc_reform: #End point of uncontrollable duration
if event_name in prev_events:
raise RuntimeError('Contigent duration loop detected!')
else:
prev_events.add(event_name)
path_ref = ctg_path(sc_reform[event_name]['ref'],sc_reform,path_cache,var_map,model,prev_events)
path = [event_name]+path_ref
else: #Controllable event
if not event_name in var_map:#1-to-1 mapping between events and variables
var_map[event_name]=model.addVar(vtype=GRB.CONTINUOUS,lb=0.0)
model.update()
path = [event_name]
path_cache[event_name]=path #Caches solution for future use
return path | 5de8eb6fe3be991da3f4af37b6e81990aa8cb34f | 3,653,185 |
def _setup_mock_socket_file(mock_socket_create_conn, resp):
"""Sets up a mock socket file from the mock connection.
Args:
mock_socket_create_conn: The mock method for creating a socket connection.
resp: iterable, the side effect of the `readline` function of the mock
socket file.
Returns:
The mock socket file that will be injected into the code.
"""
fake_file = mock.Mock()
fake_file.readline.side_effect = resp
fake_conn = mock.Mock()
fake_conn.makefile.return_value = fake_file
mock_socket_create_conn.return_value = fake_conn
return fake_file | 5b70c73bb948211919065298a01a48d927e64482 | 3,653,186 |
def get_defense_type(action: int, game_config) -> int:
"""
Utility method for getting the defense type of action-id
:param action: action-id
:param game_config: game configuration
:return: action type
"""
defense_type = action % (game_config.num_attack_types+1) # +1 for detection
return defense_type | 68a05cf15bd833fb24aa448b8be2d08c1a949d12 | 3,653,187 |
def color_box(
colors, border="#000000ff", border2=None, height=32, width=32,
border_size=1, check_size=4, max_colors=5, alpha=False, border_map=0xF
):
"""Color box."""
return colorbox.color_box(
colors, border, border2, height, width,
border_size, check_size, max_colors, alpha, border_map
) | 6f8a98743c11985529afd5ad0c04a64c1301f85a | 3,653,188 |
def get_performance_of_lstm_classifier(X, y, n_epochs, verbose=1, final_score=False):
"""
Reshapes feature matrix X, applies LSTM and returns the performance of the neural network
:param X: List of non-reshaped/original feature matrices (one per logfile)
:param y: labels
:param n_epochs: Number of epochs the model should be trained
:param verbose: verbose mode of keras_model.fit
:param final_score: If final score should be printed, then don't use a validation set
:return rocs, recalls, specificities, presicions, f1s
"""
X_list, y_list = _get_splitted_up_feature_matrix_and_labels(X, y)
globals()["_maxlen"] = max(len(fm) for fm in X_list)
if final_score:
X_train_list, y_train_list, X_test_list, y_test_list, X_val, y_val = \
_split_into_train_test_val_data(X_list, y_list, size_test_set=3, size_val_set=0)
X_lstm, y_lstm = _get_reshaped_matrices(X_train_list, y_train_list)
model = _generate_lstm_classifier((X_lstm.shape[1], X_lstm.shape[2]))
trained_model = _fit_lstm(model, X_lstm, y_lstm, n_epochs, verbose)
else:
X_train_list, y_train_list, X_test_list, y_test_list, X_val, y_val = \
_split_into_train_test_val_data(X_list, y_list, size_test_set=3, size_val_set=2)
X_lstm, y_lstm = _get_reshaped_matrices(X_train_list, y_train_list)
X_val, y_val = _get_reshaped_matrices(X_val, y_val)
model = _generate_lstm_classifier((X_lstm.shape[1], X_lstm.shape[2]))
trained_model = _fit_lstm(model, X_lstm, y_lstm, n_epochs, verbose, val_set=(X_val, y_val))
print('Performance training set: ')
_calculate_performance(X_lstm, y_lstm, trained_model)
print('Performance test set: ')
rocs, recalls, specificities, presicions, f1s = _calculate_performance(X_test_list, y_test_list, trained_model)
return rocs, recalls, specificities, presicions, f1s | 13a494f9aca643ff23ce6954471ef007df96f9e8 | 3,653,189 |
def worker(data):
"""Thread function."""
width, column = data
queen = Queen(width)
queen.run(column)
return queen.solutions | ef0f3c6410885ac2e20b28f009085d92b6fca22b | 3,653,190 |
def eitem(self, key, value):
"""Translate included eitems."""
_eitem = self.get("_eitem", {})
urls = []
for v in force_list(value):
urls.append(
{
"description": "E-book by EbookCentral",
"value": clean_val("u", v, str),
}
)
_eitem.update({"urls": urls})
return _eitem | d9a5d3f9dc29baa15d9df6b4fe32c7f20151316c | 3,653,191 |
def annotate_group(groups, ax=None, label=None, labeloffset=30):
"""Annotates the categories with their parent group and add x-axis label"""
def annotate(ax, name, left, right, y, pad):
"""Draw the group annotation"""
arrow = ax.annotate(name, xy=(left, y), xycoords="data",
xytext=(right, y - pad), textcoords="data",
annotation_clip=False, verticalalignment="top",
horizontalalignment="center", linespacing=2.0,
arrowprops={'arrowstyle': "-", 'shrinkA': 0, 'shrinkB': 0,
'connectionstyle': "angle,angleB=90,angleA=0,rad=5"}
)
return arrow
if ax is None:
ax = plt.gca()
level = 0
for level in range(len(groups)):
grp = groups[level]
for name, coord in list(grp.items()):
ymin = ax.get_ylim()[0] - np.ptp(ax.get_ylim()) * 0.12 - np.ptp(ax.get_ylim()) * 0.05 * (level)
ypad = 0.01 * np.ptp(ax.get_ylim())
xcenter = np.mean(coord)
annotate(ax, name, coord[0], xcenter, ymin, ypad)
annotate(ax, name, coord[1], xcenter, ymin, ypad)
if label is not None:
# Define xlabel and position it according to the number of group levels
ax.annotate(label,
xy=(0.5, 0), xycoords="axes fraction",
xytext=(0, -labeloffset - (level + 1) * 15), textcoords="offset points",
verticalalignment="top", horizontalalignment="center")
return | 33f57ccf96b4b0907ea8c2ea161e19b0e6e536d2 | 3,653,192 |
def background_schwarzfischer(fluor_chan, bin_chan, div_horiz=7, div_vert=5, mem_lim=None, memmap_dir=None):
"""Perform background correction according to Schwarzfischer et al.
Arguments:
fluor_chan -- (frames x height x width) numpy array; the fluorescence channel to be corrected
bin_chan -- boolean numpy array of same shape as `fluor_chan`; segmentation map (background=False, cell=True)
div_horiz -- int; number of (non-overlapping) tiles in horizontal direction
div_vert -- int; number of (non-overlapping) tiles in vertical direction
mem_lim -- max number of bytes for temporary data before switching to memmap;
if in (0,1], max percentage of free memory to be used;
if non-positive, always use memory; if None, decide automatically
memmap_dir -- str; directory for creating memmap
Returns:
Background-corrected fluorescence channel as numpy array (dtype single) of same shape as `fluor_chan`
"""
n_frames, height, width = fluor_chan.shape
# Allocate arrays
if np.can_cast(fluor_chan, np.float16):
dtype_interp = np.float16
elif np.can_cast(fluor_chan, np.float32):
dtype_interp = np.float32
else:
dtype_interp = np.float64
dtype_interp = np.dtype(dtype_interp)
bg_mean = np.empty((n_frames, 1, 1), dtype=dtype_interp)
# Create large arrays in memory or as memmap
if mem_lim is None or mem_lim > 0:
bg_interp, arr_temp, iter_temp = _get_arr(fluor_chan.shape, dtype_interp, mem_lim, memmap_dir)
else:
bg_interp, arr_temp, iter_temp = np.empty(shape=fluor_chan.shape, dtype=dtype_interp)
# Construct tiles for background interpolation
# Each pair of neighboring tiles is overlapped by a third tile, resulting in a total tile number
# of `2 * div_i - 1` tiles for each direction `i` in {`horiz`, `vert`}.
# Due to integer rounding, the sizes may slightly vary between tiles.
tiles_vert = _make_tiles(height, div_vert)
tiles_horiz = _make_tiles(width, div_horiz)
supp = np.empty((tiles_horiz.size, tiles_vert.size))
# Interpolate background as cubic spline with each tile’s median as support point at the tile center
for t in range(n_frames):
print(f"Interpolating background in frame {t:3d} …")
masked_frame = ma.masked_array(fluor_chan[t, ...], mask=bin_chan[t, ...])
for iy, (y, sy) in enumerate(tiles_vert):
for ix, (x, sx) in enumerate(tiles_horiz):
supp[ix, iy] = ma.median(masked_frame[sy, sx])
bg_spline = scint.RectBivariateSpline(x=tiles_horiz['center'], y=tiles_vert['center'], z=supp)
patch = bg_spline(x=range(width), y=range(height)).T
bg_interp[t, ...] = patch
bg_mean[t, ...] = patch.mean()
# Correct for background using Schwarzfischer’s formula:
# corrected_image = (raw_image - interpolated_background) / gain
# wherein, in opposite to Schwarzfischer, the gain is approximated as
# median(interpolated_background / mean_background)
# This “simple” calculation may consume more memory than available.
# Therefore, a less readable but more memory-efficient command flow is used.
for st, sl in iter_temp:
np.divide(bg_interp[:, sl, :], bg_mean, out=arr_temp[:, :st, :])
np.subtract(fluor_chan[:, sl, :], bg_interp[:, sl, :], out=bg_interp[:, sl, :])
np.divide(bg_interp[:, sl, :], np.median(arr_temp[:, :st, :], axis=0, keepdims=True), out=bg_interp[:, sl, :])
# `bg_interp` now holds the corrected image
return bg_interp | 512d1721dc14a4f7a09843603b8700360f97fd37 | 3,653,193 |
def get_output_data_path(extension, suffix=None):
"""Return full path for data file with extension, generated by a test script"""
name = get_default_test_name(suffix)
return osp.join(TST_PATH[0], f"{name}.{extension}") | ce5437c23061df490a31ac11f26f72e5935f0fd7 | 3,653,195 |
def _plot(self, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot `close` and overlay it with the heatmap of `labels`."""
if self.wrapper.ndim > 1:
raise TypeError("Select a column first. Use indexing.")
return self.close.rename('close').vbt.overlay_with_heatmap(self.labels.rename('labels'), **kwargs) | eaa6df4f29db8d1ab6dc0ffd1b9ecf8804f6aac9 | 3,653,196 |
def _set_global_vars(metadata):
"""Identify files used multiple times in metadata and replace with global variables
"""
fnames = collections.defaultdict(list)
for sample in metadata.keys():
for k, v in metadata[sample].items():
print k, v
if os.path.isfile(v):
v = _expand_file(v)
metadata[sample][k] = v
fnames[v].append(k)
loc_counts = collections.defaultdict(int)
global_vars = {}
global_var_sub = {}
for fname, locs in fnames.items():
if len(locs) > 1:
loc_counts[locs[0]] += 1
name = "%s%s" % (locs[0], loc_counts[locs[0]])
global_var_sub[fname] = name
global_vars[name] = fname
for sample in metadata.keys():
for k, v in metadata[sample].items():
if v in global_var_sub:
metadata[sample][k] = global_var_sub[v]
return metadata, global_vars | 23caefdf0f999a9b60649c85278edb8498b771b3 | 3,653,197 |
def user_get(context, id):
"""Get user by id."""
return IMPL.user_get(context, id) | b3108b4627751d5dfef1b42b8ccad0295b33cc99 | 3,653,198 |
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array('2000-01-01', dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x | f6f55ff17ba29aab5946c682b825c72eb70324dd | 3,653,199 |
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
# print('correct shape:', correct.shape)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
if len(res) == 1:
return res[0]
else:
return (res[0], res[1], correct[0], pred[0]) | a5b2c3d97c839e0ae9954ce48889d5b46966b3cb | 3,653,202 |
def yyyydoy2jd(year,doy,hh=0,mm=0,ss=0.0):
"""
yyyydoy2jd Take a year, day-of-year, etc and convert it into a julian day
Usage: jd = yyyydoy2jd(year,doy,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24) (not required)
mm - 2 digit, or less int,(0 <= ss < 60) (not required)
ss - float (not required)
Output: 'jd' (float)
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
#
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
mn = dto.month
dy = dto.day
jd = cal2jd(int(year),int(mn),int(dy))
jd = jd + float(hh)/24. + float(mm)/60./24. + float(sec)/3600./24.
return jd - 2400000.5 | 7e0579197146435d4c3e5031de962b758555846f | 3,653,203 |
def lon2index(lon, coords, corr=True):
"""convert longitude to index for OpenDAP request"""
if corr:
if lon < 0:
lon += 360
lons = coords.lon.values
return np.argmin(np.abs(lons - lon)) | 3fd3571ab221533708c32c9e28293a90ee9f30cd | 3,653,204 |
def get_dynamic_call_address(ea):
"""Find all dynamic calls e.g call eax"""
dism_addr_list = list(FuncItems(ea))
return [addr for addr in dism_addr_list if print_insn_mnem(addr) == 'call' and get_operand_type(addr, 0)==1] | 1f4d0eb3bcfdf0728d12efdfd151246f0497c8dd | 3,653,205 |
def iwbo_nats(model, x, k, kbs=None):
"""Compute the IWBO in nats."""
if kbs: return - iwbo_batched(model, x, k, kbs).mean()
else: return - iwbo(model, x, k).mean() | 5620e60710e6c25804d66f4c668f4670e033fdbe | 3,653,206 |
def ko_json(queryset, field_names=None, name=None, safe=False):
"""
Given a QuerySet, return just the serialized representation
based on the knockout_fields. Useful for middleware/APIs.
Convenience method around ko_data.
"""
return ko_data(queryset, field_names, name, safe, return_json=True) | 25d3b433ffec6eb4e6bb8c0d39a9080692dee4f2 | 3,653,207 |
def delete_demo(guid):
"""
Delete a demo object and all its children.
:param guid: The demo's guid
:return:
"""
web_utils.check_null_input((guid, 'demo to delete'))
demo_service.delete_demo_by_guid(guid)
return '', 204 | eb0a205e4279003a99159b2aeb4b8caefd47c2be | 3,653,209 |
def return_json():
"""
Sample function that has been given a different name
"""
print("Tooler should render out the JSON value returned")
return {"one": 1, "deep": {"structure": ["example"]}} | bf28fab61cabfc3a4f30736e58490d5df6702dc2 | 3,653,210 |
def get(url) -> str:
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: str
:returns:
UTF-8 encoded string of response
"""
return _execute_request(url).read().decode("utf-8") | 2f0b6ed542f75f83478f672ef1f39f192dddbf66 | 3,653,211 |
def train_step(model_optimizer, game_board_log, predicted_action_log,
action_result_log):
"""Run one training step."""
def loss_fn(model_params):
logits = PolicyGradient().apply({'params': model_params}, game_board_log)
loss = compute_loss(logits, predicted_action_log, action_result_log)
return loss
grad_fn = jax.grad(loss_fn)
grads = grad_fn(model_optimizer.target)
model_optimizer = model_optimizer.apply_gradient(grads)
return model_optimizer | 628742cb6d2fe19d25b5e283c7bec6f5189fc7b5 | 3,653,214 |
def make_static_rnn_with_control_flow_v2_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batches": [4],
"time_step_size": [4],
"input_vec_size": [3],
"num_cells": [4],
"use_sequence_length": [True, False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in range(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batches, input_vec_size])
inputs_after_split.append(one_timestamp_input)
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_cells, activation=tf.nn.relu, state_is_tuple=True)
sequence_length = None
if parameters["use_sequence_length"]:
# Using different sequence length in each bach, like [1, 2, 3, 3...].
sequence_length = [
min(i + 1, time_step_size) for i in range(num_batches)
]
cell_outputs, _ = rnn.static_rnn(
lstm_cell,
inputs_after_split,
dtype=tf.float32,
sequence_length=sequence_length)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(parameters["dtype"],
[kernel.shape[0], kernel.shape[1]], -1,
1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in range(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batches, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True) | aa29c5eddab46624c36be29ee9ce1e6a83efbd7a | 3,653,215 |
def jaccard(structured_phrases, phrases_to_score, partial=False, status_callback=None, status_increment=None, pmd_class=PartialMatchDict):
""" calculate jaccard similarity between phrases_to_score, using
structured_phrases to determine cooccurrences. For phrases `a' and `b', let
A be the set of documents `a' appeared in, and B be the set of documents
`b' appeared in. Then the Jaccard similarity of `a' and `b' is Similarity
value of two phrases is |A intersect B| / |A union B|.
Setting partial to true allows partial phrase matching: two phrases are the
same if they have any common subsequence of words. Very slow.
"""
# indicies will index into our union and intersection arrays
phrases = {}
if partial:
indices = pmd_class()
else:
indices = {}
for i, phrase in enumerate(phrases_to_score):
indices[phrase] = i
phrases[i] = phrase
N = len(phrases_to_score)
phrase_count = np.zeros(N)
if partial:
intersection = np.zeros((N, N), dtype=np.uint32)
else:
intersection = dok_matrix((N, N), dtype=np.uint32)
count = 0
if status_callback and not status_increment:
length = len(structured_phrases)
status_increment = length / 100
# take each document
for doc_phrases in structured_phrases:
if status_callback and status_increment > 0 and count % status_increment == 0:
try:
status_callback(status_format(float(count) / length))
except:
status_callback("%d processed" % count)
count += 1
# take all phrases within this document
for i in range(len(doc_phrases)):
np1 = tuple(doc_phrases[i])
if np1 in indices:
# this phrase is important enough to count
if partial:
matches1 = indices[np1]
else:
matches1 = set()
matches1.add(indices[np1])
for index1 in matches1:
phrase_count[index1] += 1
for k in range(i + 1, len(doc_phrases)):
np2 = tuple(doc_phrases[k])
if np2 in indices:
# this np is important enough to count
if partial:
matches2 = indices[np2]
else:
matches2 = set()
matches2.add(indices[np2])
for index1 in matches1:
for index2 in matches2:
if index2 != index1:
intersection[index1,index2] += 1
intersection[index2,index1] += 1
# use inclusion exclusion
if partial:
tiled_phrase_count = np.lib.stride_tricks.as_strided(phrase_count,
(N, phrase_count.size),
(0, phrase_count.itemsize))
union = tiled_phrase_count + tiled_phrase_count.T - intersection
jaccard = intersection / union
else:
jaccard = dok_matrix((N, N))
for coords, intersection_count in intersection.iteritems():
jaccard[coords] = intersection_count / (phrase_count[coords[0]] + phrase_count[coords[1]] - intersection_count)
jaccard = np.asarray(jaccard.todense())
return jaccard, phrases | c7af246028f59b2375974390f337063d740d2f53 | 3,653,216 |
import ast
def print_python(node: AST) -> str:
"""Takes an AST and produces a string containing a human-readable
Python expression that builds the AST node."""
return black.format_str(ast.dump(node), mode=black.FileMode()) | 06281c4622d2b13008c17763bb59f93dfc44527c | 3,653,217 |
def reg2deg(reg):
"""
Converts phase register values into degrees.
:param cycles: Re-formatted number of degrees
:type cycles: int
:return: Number of degrees
:rtype: float
"""
return reg*360/2**32 | c7dbd6119ad3bce9261fb3d78a369251ade2d8af | 3,653,218 |
import pathlib
def load_config_at_path(path: Pathy) -> Dynaconf:
"""Load config at exact path
Args:
path: path to config file
Returns:
dict: config dict
"""
path = pathlib.Path(path)
if path.exists() and path.is_file():
options = DYNACONF_OPTIONS.copy()
options.update({
'root_path': str(path.parent),
'settings_file': str(path.name),
})
return Dynaconf(**options)
else:
raise ConfigurationError(
f'Couldn\'t find ballet.yml config file at {path!s}') | da5cc4b830ad3a50ec6713bb509d3db0862963bf | 3,653,221 |
def _build_target(action, original_target, plugin, context):
"""Augment dictionary of target attributes for policy engine.
This routine adds to the dictionary attributes belonging to the
"parent" resource of the targeted one.
"""
target = original_target.copy()
resource, _w = _get_resource_and_action(action)
hierarchy_info = attributes.RESOURCE_HIERARCHY_MAP.get(resource, None)
if hierarchy_info and plugin:
# use the 'singular' version of the resource name
parent_resource = hierarchy_info['parent'][:-1]
parent_id = hierarchy_info['identified_by']
f = getattr(plugin, 'get_%s' % parent_resource)
# f *must* exist, if not found it is better to let quantum explode
# Note: we do not use admin context
data = f(context, target[parent_id], fields=['tenant_id'])
target['%s_tenant_id' % parent_resource] = data['tenant_id']
return target | e3c62944d7083ee96ad510fff0807db50aed9602 | 3,653,222 |
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Configure Gammu state machine."""
device = entry.data[CONF_DEVICE]
config = {"Device": device, "Connection": "at"}
gateway = await create_sms_gateway(config, opp)
if not gateway:
return False
opp.data[DOMAIN][SMS_GATEWAY] = gateway
opp.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | c0a14f2a92d06e814728ff0ceed05bff17acb66a | 3,653,223 |
def grep_response_body(regex_name, regex, owtf_transaction):
"""Grep response body
:param regex_name: Regex name
:type regex_name: `str`
:param regex: Regex
:type regex:
:param owtf_transaction: OWTF transaction
:type owtf_transaction:
:return: Output
:rtype: `dict`
"""
return grep(regex_name, regex, owtf_transaction.get_raw_response_body) | b5e9899675a63fe9ede9a9cf612b2004d52bb364 | 3,653,224 |
def link(f, search_range, pos_columns=None, t_column='frame', verbose=True, **kwargs):
"""
link(f, search_range, pos_columns=None, t_column='frame', memory=0,
predictor=None, adaptive_stop=None, adaptive_step=0.95,
neighbor_strategy=None, link_strategy=None, dist_func=None,
to_eucl=None)
Link a DataFrame of coordinates into trajectories.
Parameters
----------
f : DataFrame
The DataFrame must include any number of column(s) for position and a
column of frame numbers. By default, 'x' and 'y' are expected for
position, and 'frame' is expected for frame number. See below for
options to use custom column names.
search_range : float or tuple
the maximum distance features can move between frames,
optionally per dimension
pos_columns : list of str, optional
Default is ['y', 'x'], or ['z', 'y', 'x'] when 'z' is present in f
t_column : str, optional
Default is 'frame'
memory : integer, optional
the maximum number of frames during which a feature can vanish,
then reappear nearby, and be considered the same particle. 0 by default.
predictor : function, optional
Improve performance by guessing where a particle will be in
the next frame.
For examples of how this works, see the "predict" module.
adaptive_stop : float, optional
If not None, when encountering an oversize subnet, retry by progressively
reducing search_range until the subnet is solvable. If search_range
becomes <= adaptive_stop, give up and raise a SubnetOversizeException.
adaptive_step : float, optional
Reduce search_range by multiplying it by this factor.
neighbor_strategy : {'KDTree', 'BTree'}
algorithm used to identify nearby features. Default 'KDTree'.
link_strategy : {'recursive', 'nonrecursive', 'numba', 'hybrid', 'drop', 'auto'}
algorithm used to resolve subnetworks of nearby particles
'auto' uses hybrid (numba+recursive) if available
'drop' causes particles in subnetworks to go unlinked
dist_func : function, optional
a custom distance function that takes two 1D arrays of coordinates and
returns a float. Must be used with the 'BTree' neighbor_strategy.
to_eucl : function, optional
function that transforms a N x ndim array of positions into coordinates
in Euclidean space. Useful for instance to link by Euclidean distance
starting from radial coordinates. If search_range is anisotropic, this
parameter cannot be used.
Returns
-------
DataFrame with added column 'particle' containing trajectory labels.
The t_column (by default: 'frame') will be coerced to integer.
See also
--------
link_iter
Notes
-----
This is an implementation of the Crocker-Grier linking algorithm.
[1]_
References
----------
.. [1] Crocker, J.C., Grier, D.G. http://dx.doi.org/10.1006/jcis.1996.0217
"""
if pos_columns is None:
pos_columns = guess_pos_columns(f)
# copy the dataframe
f = f.copy()
# coerce t_column to integer type
if not np.issubdtype(f[t_column].dtype, np.integer):
f[t_column] = f[t_column].astype(np.integer)
# sort on the t_column
pandas_sort(f, t_column, inplace=True)
coords_iter = coords_from_df(f, pos_columns, t_column)
ids = []
for i, _ids in link_iter(coords_iter, search_range, verbose=verbose, **kwargs):
ids.extend(_ids)
f['particle'] = ids
return f | 425f7ffe9bcda4700bc77e74c2e956f27f22d521 | 3,653,225 |
def get_classifier(opt, input_dim):
"""
Return a tuple with the ML classifier to be used and its hyperparameter
options (in dict format)."""
if opt == 'RF':
ml_algo = RandomForestClassifier
hyperparams = {
'n_estimators': [100],
'max_depth': [None, 10, 30, 50, 100],
'min_samples_split': [2, 10, 50, 100],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'GBDT':
ml_algo = LGBMClassifier
hyperparams = {
'boosting_type': ['gbdt'],
'n_estimators': [100],
'max_depth': [-1, 10, 30, 50, 100],
'num_leaves': [2, 3, 5, 10, 50],
'learning_rate': [0.001, 0.01, 0.1],
'class_weight': [None, 'balanced'],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'LR':
ml_algo = LogisticRegression
hyperparams = {
'solver': ['newton-cg', 'lbfgs', 'saga'],
'C': [0.0001, 0.001, 0.01],
'class_weight': [None, 'balanced'],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'GNB':
ml_algo = GaussianNB
hyperparams = {
'var_smoothing': [10**-i for i in range(2, 15)],
}
elif opt == 'SVM':
ml_algo = SVC
hyperparams = {
'probability': [True],
'C': [0.01, 0.1, 1, 10],
'gamma': [0.001, 0.01, 0.1, 1],
}
elif opt == 'NN':
ml_algo = KerasClassifier(get_nn_model(input_dim), epochs=30, verbose=0)
hyperparams = {}
else:
raise ValueError(f'{opt} is an invalid classifier name.')
return ml_algo, hyperparams | a522cab05958023dd4239e4ec2b136d2510aec1b | 3,653,226 |
def list_spiders_endpoint():
"""It returns a list of spiders available in the SPIDER_SETTINGS dict
.. version 0.4.0:
endpoint returns the spidername and endpoint to run the spider from
"""
spiders = {}
for item in app.config['SPIDER_SETTINGS']:
spiders[item['endpoint']] = 'URL: ' + request.url_root + 'run-spider/' + item['endpoint']
return jsonify(endpoints=spiders) | 71e7448a621565b540c8ade1dae04d8ef88d5fd2 | 3,653,227 |
def plot3dOnFigure(ax, pixels, colors_rgb,axis_labels=list("RGB"), axis_limits=((0, 255), (0, 255), (0, 255))):
"""Plot pixels in 3D."""
# Set axis limits
ax.set_xlim(*axis_limits[0])
ax.set_ylim(*axis_limits[1])
ax.set_zlim(*axis_limits[2])
# Set axis labels and sizes
ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
# Plot pixel values with colors given in colors_rgb
ax.scatter(
pixels[:, :, 0].ravel(),
pixels[:, :, 1].ravel(),
pixels[:, :, 2].ravel(),
c=colors_rgb.reshape((-1, 3)), edgecolors='none')
return ax | 067219abba7f77f7c4fbb4404ff16a3f5192f7cd | 3,653,228 |
import numpy
def ellipse(a, b, center=(0.0, 0.0), num=50):
"""Return the coordinates of an ellipse.
Parameters
----------
a : float
The semi-major axis of the ellipse.
b : float
The semi-minor axis of the ellipse.
center : 2-tuple of floats, optional
The position of the center of the ellipse;
default: (0.0, 0.0)
num : integer, optional
The number of points on the upper side of the ellipse.
The number includes the leading and trailing edges.
Thus, the total number of points will be 2 * (num - 1);
default: 50.
Returns
-------
x : numpy.ndarray
The x-coordinates of the ellipse as a 1D array of floats.
y: numpy.ndarray
The y-coordinates of the ellipse as a 1D array of floats.
"""
xc, yc = center
x_upper = numpy.linspace(xc + a, xc - a, num=num)
y_upper = b / a * numpy.sqrt(a**2 - x_upper**2)
x_lower = numpy.linspace(xc - a, xc + a, num=num)[1:-1]
y_lower = -b / a * numpy.sqrt(a**2 - x_lower**2)
x = numpy.concatenate((x_upper, x_lower))
y = numpy.concatenate((y_upper, y_lower))
return x, y | bd4d4663981a0431e40b20d38cc48a7f2476c13b | 3,653,230 |
from typing import List
def get_trade_factors(name: str,
mp: float,
allow_zero: bool,
long_open_values: List,
long_close_values: List,
short_open_values: List = None,
short_close_values: List = None) -> dict:
"""获取指定 name 下的交易因子
:param allow_zero: 是否使用基础型
:param name: 因子系统的名称
:param mp: 单个标的最大允许持仓,小于0表示仓位百分比,大于0表示手数
:param long_open_values: 开多因子值
:param long_close_values: 平多因子值
:param short_open_values: 开空因子值
:param short_close_values: 平空因子值
:return: 因子交易系统
example:
===================
>>> factors = get_trade_factors(name="日线笔结束", long_open_values=['BDE'], long_close_values=['BUE'])
"""
if not short_close_values:
short_close_values = []
if not short_open_values:
short_open_values = []
def __is_match(v, x):
if allow_zero:
if v in x.name:
return 1
else:
return 0
else:
if v in x.name and "0" not in x.name:
return 1
else:
return 0
long_open_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in long_open_values]) > 0]
long_close_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in long_close_values]) > 0]
short_open_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in short_open_values]) > 0]
short_close_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in short_close_values]) > 0]
factors_ = {
"name": name,
"version": factors_all[name].__name__,
"mp": mp,
"long_open_factors": long_open_factors,
"long_close_factors": long_close_factors,
"short_open_factors": short_open_factors,
"short_close_factors": short_close_factors,
}
return factors_ | 14a7a8c0968e85f996e9c1e8f473be142c66759b | 3,653,233 |
def mbstrlen(src):
"""Return the 'src' string (Multibytes ASCII string) length.
:param src: the source string
"""
try:
return len(src.decode("utf8", errors = "replace"))
except Exception, err:
LOG.error("String convert issue %s", err)
return len(src) | 8b2f64b2791eebf898d3bf8104d93d86dcdd53a3 | 3,653,234 |
def adapted_border_postprocessing(border_prediction, cell_prediction):
"""
:param border_prediction:
:param cell_prediction:
:return:
"""
prediction_border_bin = np.argmax(border_prediction, axis=-1)
cell_prediction = cell_prediction > 0.5
seeds = border_prediction[:, :, 1] * (1 - border_prediction[:, :, 2]) > 0.5 # Substract borders from cell seed
seeds = measure.label(seeds, background=0)
prediction_instance = watershed(image=cell_prediction,
markers=seeds,
mask=cell_prediction,
watershed_line=False,
)
prediction_instance = measure.label(prediction_instance, background=0)
colors = get_colors()
prediction_instance_rgb = label2rgb(prediction_instance, colors=colors, kind='overlay', bg_label=0)
prediction_instance = np.expand_dims(prediction_instance, axis=-1)
prediction_border_bin = np.expand_dims(prediction_border_bin, axis=-1)
return prediction_instance.astype(np.uint16), prediction_instance_rgb.astype(np.uint8), prediction_border_bin.astype(np.uint8) | 4e74c1a71fb5c5f90d54735fa3af241461b48ebb | 3,653,235 |
def calc_bonding_volume(rc_klab, dij_bar, rd_klab=None, reduction_ratio=0.25):
"""
Calculate the association site bonding volume matrix
Dimensions of (ncomp, ncomp, nbeads, nbeads, nsite, nsite)
Parameters
----------
rc_klab : numpy.ndarray
This matrix of cutoff distances for association sites for each site type in each group type
dij_bar : numpy.ndarray
Component averaged hard sphere diameter
rd_klab : numpy.ndarray, Optional, default=None
Position of association site in each group (nbead, nbead, nsite, nsite)
reduction_ratio : float, Optional, default=0.25
Reduced distance of the sites from the center of the sphere of interaction. This value is used when site position, rd_klab is None
Returns
-------
Kijklab : numpy.ndarray
Matrix of binding volumes
"""
ncomp = len(dij_bar)
nbead, _, nsite, _ = np.shape(rc_klab)
Kijklab = np.zeros((ncomp, ncomp, nbead, nbead, nsite, nsite))
for i in range(ncomp):
for j in range(ncomp):
for k in range(nbead):
for l in range(nbead):
for a in range(nsite):
for b in range(nsite):
if rc_klab[k, l, a, b] != 0:
if rd_klab == None:
rd = reduction_ratio * dij_bar[i, j]
else:
rd = rd_klab[k, l, a, b]
tmp0 = np.pi * dij_bar[i, j] ** 2 / (18 * rd ** 2)
tmp11 = np.log(
(rc_klab[k, l, a, b] + 2 * rd) / dij_bar[i, j]
)
tmp12 = (
6 * rc_klab[k, l, a, b] ** 3
+ 18 * rc_klab[k, l, a, b] ** 2 * rd
- 24 * rd ** 3
)
tmp21 = rc_klab[k, l, a, b] + 2 * rd - dij_bar[i, j]
tmp22 = (
22 * rd ** 2
- 5 * rd * rc_klab[k, l, a, b]
- 7 * rd * dij_bar[i, j]
- 8 * rc_klab[k, l, a, b] ** 2
+ rc_klab[k, l, a, b] * dij_bar[i, j]
+ dij_bar[i, j] ** 2
)
Kijklab[i, j, k, l, a, b] = tmp0 * (
tmp11 * tmp12 + tmp21 * tmp22
)
return Kijklab | cf154af6287286c19d606a2324c548f70f90121b | 3,653,236 |
def scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor.
"""
w_w, h_h, x_ctr, y_ctr = genwhctrs(anchor)
w_s = w_w * scales
h_s = h_h * scales
anchors = makeanchors(w_s, h_s, x_ctr, y_ctr)
return anchors | 8de95fc6966133a74f10318f23e97babcb36d5cd | 3,653,237 |
def L1():
"""
Graph for computing 'L1'.
"""
graph = beamline(scatter=True)
for node in ['scattered_beam', 'two_theta', 'L2', 'Ltotal']:
del graph[node]
return graph | 1bd17365107740a41d88ac3825ef2aca412bb616 | 3,653,238 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.