content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import random
def describe_current_subtask(subtask, prefix=True):
"""
Make a 'natural' language description of subtask name
"""
to_verb = {"AnswerQuestion": "answering a question",
"ArmGoal": "moving my arm",
"DemoPresentation": "giving a demo",
"Find": "finding",
"Follow": "following",
"Guide": "guiding",
"GripperGoal": "moving my gripper",
"HandOver": "handing something over",
"Inspect": "inspecting",
"LookAt": "looking",
"NavigateTo": "navigating",
"PickUp": "picking up",
"Place": "placing",
"ResetWM": "resetting my world model",
"Say": "speaking",
"SendPicture": "sending a picture",
"TurnTowardSound": "turning towards a sound"}
description = to_verb.get(subtask, subtask + "ing")
if prefix:
description = random.choice(["I'm busy", "I'm"]) + " " + description
return description | 628c699201c26242bd72c6066cba07cce54b14ca | 6,165 |
def addprint(x: int, y: int):
"""Print and "added" representation of `x` and `y`."""
expr = x + y
return "base addprint(x=%r, y=%r): %r" % (x, y, expr) | e3f735afc1d4826a1af7210c3cec88c8b8c87dfe | 6,166 |
import re
def parse_date(deadline_date):
"""
Given a date in the form MM/DD/YY or MM/DD/YYYY, returns
the integers MM, DD, and YYYY (or YY) in this order.
"""
deadline_split = re.split('\\/|\\-', deadline_date)
return int(deadline_split[0]), int(deadline_split[1]), int(deadline_split[2]) | 0ded6bccce8437aad61cfa5ff121c5ed0595849b | 6,167 |
import requests
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主力合约资金流向排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金流向数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种资金流向排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["flowCategory"]),
data_json["flowCategory"],
data_json["flowValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantFlowCategory"]),
data_json["dominantFlowCategory"],
data_json["dominantFlowValue"],
],
index=["date", "symbol", "fund"],
).T | 10cfb29f1705460916fa93542ba72a22b3cdbf70 | 6,168 |
def generate_points_in_areas(gdf, values, points_per_unit=1, seed=None):
"""
Create a GeoSeries of random points in polygons.
Parameters
----------
gdf : GeoDataFrame
The areas in which to create points
values : str or Series
The [possibly scaled] number of points to create in each area
points_per_unit : numeric, optional
The rate to scale the values in point generation.
seed : int, optional
A random seed
Returns
-------
GeoSeries
"""
geometry = gdf.geometry
if isinstance(values, str) and values in gdf.columns:
values = gdf[values]
new_values = (values / points_per_unit).astype(int)
g = gpd.GeoDataFrame(data={'vals': new_values}, geometry=geometry)
a = g.apply(lambda row: tuple(generate_random_points_in_polygon(row['geometry'], row['vals'], seed)), 1)
b = gpd.GeoSeries(a.apply(pd.Series).stack(), crs=geometry.crs)
b.name = 'geometry'
return b | 14232540c4bee8c9863b2af4f3f2f200bb261098 | 6,169 |
def template_dict(input_dict_arg, params_dict_arg):
"""function to enable templating a dictionary"""
output_dict = input_dict_arg
for key, value in output_dict.items():
if isinstance(value, str):
output_dict[key] = params_re_str(value, params_dict_arg)
elif isinstance(value, dict):
output_dict[key] = template_dict(value, params_dict_arg)
elif isinstance(value, list):
output_dict[key] = template_list(value, params_dict_arg)
return output_dict | 3a9e2df200f52f9ec320ab3900653851dfb77fcc | 6,171 |
def _traverse_dictionaries(instance, parent="spin_systems"):
"""Parses through the instance object contained within the parent object and return
a list of attributes that are populated.
Args:
instance: An instance object from the parent object.
parent: a string object used to create the addresses of the SpinSystem
attributes.
Returns:
List Object.
"""
if isinstance(instance, list):
return [
value
for i, obj in enumerate(instance)
for value in _traverse_dictionaries(obj, _str_encode(f"{parent}[{i}]"))
]
if isinstance(instance, dict):
return [
item
for key, value in instance.items()
if key not in EXCLUDE and value is not None
for item in (
_traverse_dictionaries(value, _str_encode(f"{parent}.{key}"))
if isinstance(value, (dict, list))
else [_str_encode(f"{parent}.{key}")]
)
]
return [] | 9ecf8050e7c4d9c4f8e84f04303f0be186f594d5 | 6,172 |
def getSingleChildTextByName(rootNode, name):
"""Returns the text of a child node found by name.
Only one such named child is expected.
"""
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]
if len(nodeList) > 0:
return nodeList[0]
else:
return None
except AttributeError:
return None | 48a8a4b2c3c95cac944bcb96e33e602d62499f19 | 6,173 |
def _get_energy_ratio_single_wd_bin_bootstrapping(
df_binned,
df_freq,
N=1,
percentiles=[5.0, 95.0],
return_detailed_output=False,
):
"""Get the energy ratio for one particular wind direction bin and
an array of wind speed bins. This function also includes bootstrapping
functionality by increasing the number of bootstrap evaluations (N) to
larger than 1. The bootstrap percentiles default to 5 % and 95 %.
"""
# Get results excluding uncertainty
if return_detailed_output:
energy_ratio_nominal, dict_info = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_binned,
df_freq=df_freq,
return_detailed_output=return_detailed_output,
)
else:
energy_ratio_nominal = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_binned,
df_freq=df_freq,
return_detailed_output=return_detailed_output,
)
# Add bootstrapping results, if necessary
if N <= 1:
results_array = np.array([energy_ratio_nominal] * 3, dtype=float)
else:
# Get a bootstrap sample of range
bootstrap_results = np.zeros(N)
bootstrap_results[0] = energy_ratio_nominal
for i in range(1, N):
df_randomized = df_binned.sample(frac=1, replace=True).copy()
bootstrap_results[i] = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_randomized,
df_freq=df_freq,
return_detailed_output=False,
)
# Return the results in the order used in previous versions
results_array = np.array(
[
energy_ratio_nominal,
np.nanpercentile(bootstrap_results, percentiles)[0],
np.nanpercentile(bootstrap_results, percentiles)[1],
]
)
if return_detailed_output:
return results_array, dict_info
else:
return results_array | a29e1ebaa9994148e473d61d7881737b62a9082e | 6,175 |
import re
def get_file_name(part):
"""get file name using regex from fragment ID"""
return re.findall(r"='(.*\-[a-z]+).*", part)[0] | 30c8867d8e14b04c593359f1c16d9bf324711ba0 | 6,177 |
def get_helping_materials(project_id, limit=100, offset=0, last_id=None):
"""Return a list of helping materials for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:param last_id: id of the last helping material, used for pagination. If provided, offset is ignored
:type last_id: integer
:type offset: integer
:returns: True -- the response status code
"""
if last_id is not None:
params = dict(limit=limit, last_id=last_id)
else:
params = dict(limit=limit, offset=offset)
print(OFFSET_WARNING)
params['project_id'] = project_id
try:
res = _pybossa_req('get', 'helpingmaterial',
params=params)
if type(res).__name__ == 'list':
return [HelpingMaterial(helping) for helping in res]
else:
return res
except: # pragma: no cover
raise | 163436a9a09816bc18b31c9911b87db74b8aefbd | 6,178 |
import math
def generate_sphere_points(n):
"""
Returns list of 3d coordinates of points on a sphere using the
Golden Section Spiral algorithm.
"""
points = []
inc = math.pi * (3 - math.sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
r = math.sqrt(1 - y*y)
phi = k * inc
points.append([math.cos(phi)*r, y, math.sin(phi)*r])
return points | bd6c7624220f7928a44f6dcb24b7112e8d803eb4 | 6,179 |
def svn_repos_dir_delta2(*args):
"""
svn_repos_dir_delta2(svn_fs_root_t src_root, char src_parent_dir, char src_entry,
svn_fs_root_t tgt_root, char tgt_path,
svn_delta_editor_t editor, void edit_baton,
svn_repos_authz_func_t authz_read_func, svn_boolean_t text_deltas,
svn_depth_t depth, svn_boolean_t entry_props,
svn_boolean_t ignore_ancestry,
apr_pool_t pool) -> svn_error_t
"""
return apply(_repos.svn_repos_dir_delta2, args) | c972237fee8c76a24fb9443a9607931566b642ff | 6,180 |
def linear_r2_points(points: np.ndarray, coef: tuple, r2: R2 = R2.classic) -> float:
"""Computes the coefficient of determination (R2).
Args:
points (np.ndarray): numpy array with the points (x, y)
coef (tuple): the coefficients from the linear fit
r2 (R2): select the type of coefficient of determination
Returns:
float: coefficient of determination (R2)
"""
x = points[:, 0]
y = points[:, 1]
return linear_r2(x, y, coef, r2) | 98c33ba3354ed22ddf3ab718f2f41967c2555f18 | 6,181 |
from typing import List
from datetime import datetime
def _show_tournament_list() -> List:
"""
Функция возвращает список предстоящих турниров
"""
tournaments = []
for tournament in loop.run_until_complete(get_request('https://codeforces.com/api/contest.list?gym=false')):
if tournament['phase'] != 'BEFORE':
break
tournaments.append(tournament)
for tournament in range(len(tournaments)):
tournaments[tournament]['durationSeconds'] = datetime.utcfromtimestamp(tournaments[tournament]['durationSeconds']).strftime("%H:%M:%S")
tournaments[tournament]['startTimeSeconds'] = datetime.utcfromtimestamp(tournaments[tournament]['startTimeSeconds']).strftime("%d.%m.%Y %H:%M:%S")
return tournaments | 0815ae126671a8c85bb3311e900db48ce87fa1f0 | 6,182 |
def less_goals_scored():
"""
returns the lowest number of goals scored during one week
"""
return goals_scored('min') | fda281196148370d4639aef9dabc6ad1cb4fd339 | 6,183 |
from typing import Sequence
from typing import Union
from typing import Tuple
def compute_avgpool_output_shape(input_shape:Sequence[Union[int, None]],
kernel_size:Union[Sequence[int], int]=1,
stride:Union[Sequence[int], int]=1,
padding:Union[Sequence[int], int]=0,
channel_last:bool=False) -> Tuple[Union[int, None]]:
""" finished, cheched,
compute the output shape of a avgpool layer
input_shape: sequence of int or None,
shape of an input Tensor,
the first dimension is the batch dimension, which is allowed to be `None`
kernel_size: int, or sequence of int, default 1,
kernel size (filter size) of the layer, should be compatible with `input_shape`
stride: int, or sequence of int, default 1,
stride (down-sampling length) of the layer, should be compatible with `input_shape`
padding: int, or sequence of int, default 0,
padding length(s) of the layer, should be compatible with `input_shape`
channel_last: bool, default False,
channel dimension is the last dimension,
or the second dimension (the first is the batch dimension by convention)
Returns:
--------
output_shape: tuple,
shape of the output Tensor
"""
output_shape = compute_output_shape(
'avgpool',
input_shape, 1, kernel_size, stride, padding, 0, 1,
channel_last,
)
return output_shape | 5116f6fdb95c1cf07d34c2193e6e08eee47a06da | 6,184 |
def _obs_intersect(((x0, y0), (x1, y1)), ((x2, y2), (x3, y3))):
"""Check if two lines intersect. The boundaries don't count as
intersection."""
base1 = (x0, y0)
base2 = (x2, y2)
dir1 = (x1-x0, y1-y0)
dir2 = (x3-x2, y3-y2)
t1, t2 = _intersect(base1, dir1, base2, dir2)
eps = 0.00001
if -eps < t1 and t1 < 1.0 + eps and -eps < t2 and t2 < 1.0 + eps:
return True
else:
return False | ea2b268adac5fc1156b566ea0c6cabdd2f4fe94e | 6,185 |
import json
import re
def project_configure(request, project_name):
"""
get configuration
:param request: request object
:param project_name: project name
:return: json
"""
# get configuration
if request.method == 'GET':
project = Project.objects.get(name=project_name)
project = model_to_dict(project)
project['configuration'] = json.loads(project['configuration']) if project['configuration'] else None
return JsonResponse(project)
# update configuration
elif request.method == 'POST':
project = Project.objects.filter(name=project_name)
data = json.loads(request.body)
configuration = json.dumps(data.get('configuration'), ensure_ascii=False)
project.update(**{'configuration': configuration})
# for safe protection
project_name = re.sub('[\!\@\#\$\;\&\*\~\"\'\{\}\]\[\-\+\%\^]+', '', project_name)
# execute generate cmd
cmd = ' '.join(['gerapy', 'generate', project_name])
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = bytes2str(p.stdout.read()), bytes2str(p.stderr.read())
if not stderr:
return JsonResponse({'status': '1'})
else:
return JsonResponse({'status': '0', 'message': stderr}) | a033d7d1810cee5e5370d8d9f6562f23e3e7e64a | 6,186 |
import time
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, h in enumerate(model.initial_state):
feed_dict[h] = state[i]
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters) | 641100d0789c3841a4b3cb67e42963387d0f888d | 6,187 |
def unemployment(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="UNRATE",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
) | a5412d78673f639e0d10a95bb91138da1b432221 | 6,188 |
import warnings
def splitunc(p):
"""Deprecated since Python 3.1. Please use splitdrive() instead;
it now handles UNC paths.
Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
warnings.warn("ntpath.splitunc is deprecated, use ntpath.splitdrive instead",
DeprecationWarning, 2)
drive, path = splitdrive(p)
if len(drive) == 2:
# Drive letter present
return p[:0], p
return drive, path | d9748b551e6a9ba101b3817ab22c74dd30cf89d1 | 6,189 |
def expand_locations(ctx, input, targets = []):
"""Expand location templates.
Expands all `$(execpath ...)`, `$(rootpath ...)` and deprecated `$(location ...)` templates in the
given string by replacing with the expanded path. Expansion only works for labels that point to direct dependencies
of this rule or that are explicitly listed in the optional argument targets.
See https://docs.bazel.build/versions/main/be/make-variables.html#predefined_label_variables.
Use `$(rootpath)` and `$(rootpaths)` to expand labels to the runfiles path that a built binary can use
to find its dependencies. This path is of the format:
- `./file`
- `path/to/file`
- `../external_repo/path/to/file`
Use `$(execpath)` and `$(execpaths)` to expand labels to the execroot (where Bazel runs build actions).
This is of the format:
- `./file`
- `path/to/file`
- `external/external_repo/path/to/file`
- `<bin_dir>/path/to/file`
- `<bin_dir>/external/external_repo/path/to/file`
The deprecated `$(location)` and `$(locations)` expansions returns either the execpath or rootpath depending on the context.
Args:
ctx: context
input: String to be expanded
targets: List of targets for additional lookup information.
Returns:
The expanded path or the original path
"""
return ctx.expand_location(input, targets = targets) | efa482d928484b7d6f9c8acbf81e0a3d5b4cd50f | 6,190 |
import requests
import json
def scrape_db(test=False, write_file=True):
"""
Function to scrape bodybuild.com recipe database and save results as json.
Parameters:
-----------
"""
# Hacky way to get all recipes - you have to request the number. Luckily,
# this is listed at the beginning of any result you pull from DB.
# We want all of the recipes, so we'll do a quick request of one recipe to
# get the 'total' number in the DB
url_request = 'https://cms-api.bodybuilding.com/BbcomRecipe'
url_parameters = {'sort': 'publishDate', 'order': 'desc', 'limit': '1'}
fake_recipes_list = requests.get(url_request, params=url_parameters)
fake_recipes = bs4.BeautifulSoup(fake_recipes_list.content, features='html.parser')
fake = json.loads(str(fake_recipes))
# Get the total number of recipes in the db
total_recipes = fake['total']
if test == True:
all_recipes = fake_recipes
else:
# Change the 'limit' on the url to the total number of recipes
url_parameters['limit'] = str(total_recipes)
all_recipes_list = requests.get(url_request, params=url_parameters)
all_recipes = bs4.BeautifulSoup(all_recipes_list.content, features='html.parser')
# Just get search results and get rid of data before.
all_recipes_list = json.loads(str(all_recipes))['_embedded']['bb-cms:search-results']
# Dump to json file - results will always be saved in 'data' folder
if write_file:
save_path = _DATA_DIR.joinpath('bodybuilding_recipes.json')
rf = open(save_path, 'w')
json.dump(all_recipes_list, rf)
rf.close()
return all_recipes_list | d9883058ac434fca861168625493467bfbcafaed | 6,191 |
import functools
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator | 9bf04a95d39b89fd10c9872dd7fe29c5c10f06a1 | 6,192 |
import re
def simplify_unicode(sentence):
"""
Most accented Latin characters are pronounced just the same as the base character.
Shrink as many extended Unicode repertoire into the Estonian alphabet as possible.
It is GOOD for machine learning to have smaller ortographic repertoire.
It is a BAD idea if we start using any proper name dictionaries for morph analysis
or pronunciations later on. You are warned.
:param sentence:
:return: str
"""
sentence = sentence.replace("Ð", "D").replace("Þ", "Th")
sentence = sentence.replace("ð", "d").replace("þ", "th")
sentence = sentence.replace("ø", "ö").replace("Ø", "Ö")
sentence = sentence.replace("ß", "ss").replace("ẞ", "Ss")
sentence = re.sub(r'S(c|C)(h|H)', r'Š', sentence)
sentence = re.sub(r'sch', r'š', sentence)
sentence = re.sub(r'[ĆČ]', r'Tš', sentence)
sentence = re.sub(r'[ćč]', r'tš', sentence)
sentence = re.sub(r'[^A-ZÄÖÜÕŽŠa-zäöüõšž ,]+', lambda m: r'{}'.format( strip_combining(m.group(0)) ), sentence)
return sentence | 291a1e002d4d428697d7b892291ad314f0000a2a | 6,193 |
import pickle
def read_file(pickle_file_name):
"""Reads composite or non-composite novelty results from Pickle file.
:param pickle_file_name: Path to input file (created by
`write_standard_file` or `write_pmm_file`).
:return: novelty_dict: Has the following keys if not a composite...
novelty_dict['denorm_radar_matrix_baseline']: See doc for
`write_standard_file`.
novelty_dict['denorm_radar_matrix_trial']: Same.
novelty_dict['novel_indices']: Same.
novelty_dict['denorm_radar_matrix_upconv']: Same.
novelty_dict['denorm_radar_matrix_upconv_svd']: Same.
novelty_dict['percent_variance_to_keep']: Same.
novelty_dict['cnn_feature_layer_name']: Same.
novelty_dict['multipass']: Same.
novelty_dict['baseline_full_id_strings']: Same.
novelty_dict['baseline_times_unix_sec']: Same.
novelty_dict['trial_full_id_strings']: Same.
novelty_dict['trial_times_unix_sec']: Same.
novelty_dict['cnn_file_name']: Same.
novelty_dict['upconvnet_file_name']: Same.
...or the following keys if composite...
novelty_dict['mean_denorm_radar_matrix_baseline']:
See doc for `write_pmm_file`.
novelty_dict['mean_denorm_radar_matrix_novel']: Same.
novelty_dict['mean_denorm_radar_matrix_upconv']: Same.
novelty_dict['mean_denorm_radar_matrix_upconv_svd']: Same.
novelty_dict['cnn_file_name']: Same.
novelty_dict['non_pmm_file_name']: Same.
novelty_dict['pmm_max_percentile_level']: Same.
:return: pmm_flag: Boolean flag. True if `novelty_dict` contains composite,
False otherwise.
:raises: ValueError: if dictionary does not contain expected keys.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
novelty_dict = pickle.load(pickle_file_handle)
pickle_file_handle.close()
pmm_flag = MEAN_BASELINE_MATRIX_KEY in novelty_dict
if pmm_flag:
missing_keys = list(
set(PMM_FILE_KEYS) - set(novelty_dict.keys())
)
else:
missing_keys = list(
set(STANDARD_FILE_KEYS) - set(novelty_dict.keys())
)
if len(missing_keys) == 0:
return novelty_dict, pmm_flag
error_string = (
'\n{0:s}\nKeys listed above were expected, but not found, in file '
'"{1:s}".'
).format(str(missing_keys), pickle_file_name)
raise ValueError(error_string) | fcc4976648bafc7e845a22552965e1f65e3ddc85 | 6,194 |
import re
def AutoscalersForMigs(migs, autoscalers, project):
"""Finds Autoscalers with target amongst given IGMs.
Args:
migs: List of triples (IGM name, scope type, scope name).
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
Returns:
A list of all Autoscalers with target on mig_names list.
"""
igm_url_regexes = []
for (name, scope_type, scope_name) in migs:
igm_url_regexes.append(
'/projects/{project}/{scopeType}/{scopeName}/'
'instanceGroupManagers/{name}$'
.format(project=project,
scopeType=(scope_type + 's'),
scopeName=scope_name,
name=name))
igm_url_regex = re.compile('(' + ')|('.join(igm_url_regexes) + ')')
result = [
autoscaler for autoscaler in autoscalers
if igm_url_regex.search(autoscaler.target)
]
return result | 12b6e10c16c7ea5324f5090cdc3027a38e1247c1 | 6,195 |
def log_loss(
predictions: ArrayLike,
targets: ArrayLike,
) -> ArrayLike:
"""Calculates the log loss of predictions wrt targets.
Args:
predictions: a vector of probabilities of arbitrary shape.
targets: a vector of probabilities of shape compatible with predictions.
Returns:
a vector of same shape of `predictions`.
"""
base.type_assert([predictions, targets], float)
return -jnp.log(likelihood(predictions, targets)) | a3d27b0229b287e32701fa80822ad1025e875a62 | 6,196 |
import json
def GetAccessTokenOrDie(options):
"""Generates a fresh access token using credentials passed into the script.
Args:
options: Flag values passed into the script.
Returns:
A fresh access token.
Raises:
ValueError: response JSON could not be parsed, or has no access_token.
"""
cred = GetDSApiCredOrDie(options)
[cid, csc, refresh_token] = cred.split(",")
query_string_template = (
"refresh_token=%s&client_id=%s&client_secret=%s"
"&grant_type=refresh_token"
)
output = RunCommand(
[
"curl",
"--data",
query_string_template % (refresh_token, cid, csc),
"https://accounts.google.com/o/oauth2/token",
]
)
json_output = json.loads(output)
if "access_token" in json_output:
return json_output["access_token"]
else:
raise ValueError("missing access_token in response: %s" % output) | 6ecbd6875931c6ef139da52578050380da4e62bd | 6,197 |
def remove_whitespace(tokens):
"""Remove any top-level whitespace and comments in a token list."""
return tuple(
token for token in tokens
if token.type not in ('whitespace', 'comment')) | 5ed78f38277487d2e05e20e10e25413b05cab8e5 | 6,198 |
def update(args):
"""
For LdaCgsMulti
"""
(docs, doc_indices, mtrand_state, dtype) = args
start, stop = docs[0][0], docs[-1][1]
global Ktype
if _K.value < 2 ** 8:
Ktype = np.uint8
elif _K.value < 2 ** 16:
Ktype = np.uint16
else:
raise NotImplementedError("Invalid Ktype. k={}".format(_K))
corpus = np.frombuffer(_corpus, dtype=dtype)[start:stop]
Z = np.frombuffer(_Z, dtype=Ktype)[start:stop].copy()
gbl_word_top = np.frombuffer(_word_top, dtype=np.float32)
gbl_word_top = gbl_word_top.reshape(_V.value, _K.value)
loc_word_top = gbl_word_top.copy()
inv_top_sums = np.frombuffer(_inv_top_sums, dtype=np.float32).copy()
top_doc = np.frombuffer(_top_doc, dtype=np.float32)
top_doc = top_doc.reshape(_K.value, int(top_doc.size/_K.value))
top_doc = top_doc[:, doc_indices[0]:doc_indices[1]].copy()
log_p = 0
log_wk = np.log(gbl_word_top * inv_top_sums[np.newaxis, :])
log_kc = np.log(top_doc / top_doc.sum(0)[np.newaxis, :])
indices = np.array([(j - start) for (i,j) in docs], dtype='i')
if dtype == np.uint16 and Ktype == np.uint8:
update_fn = cgs_update[cython.ushort,cython.uchar]
elif dtype == np.uint16 and Ktype == np.uint16:
update_fn = cgs_update[cython.ushort,cython.ushort]
elif dtype == np.uint32 and Ktype == np.uint8:
update_fn = cgs_update[cython.uint,cython.uchar]
elif dtype == np.uint32 and Ktype == np.uint16:
update_fn = cgs_update[cython.uint,cython.ushort]
else:
raise NotImplementedError
results = update_fn(_iteration.value,
corpus,
loc_word_top,
inv_top_sums,
top_doc,
Z,
indices,
mtrand_state[0],
mtrand_state[1],
mtrand_state[2],
mtrand_state[3],
mtrand_state[4])
#final_results = [np.asarray(result, dtype=dtype)
# for result,dtype in zip(results[:4],
# [Ktype, np.float32, np.float32, np.float32])]
#final_results.extend(results[4:])
(loc_word_top, inv_top_sums, top_doc, Z, log_p, mtrand_str, mtrand_keys,
mtrand_pos, mtrand_has_gauss, mtrand_cached_gaussian) = results
loc_word_top -= gbl_word_top
return (Z, top_doc, loc_word_top, log_p,
mtrand_str, mtrand_keys, mtrand_pos,
mtrand_has_gauss, mtrand_cached_gaussian) | 2dd014472c77e363fafab1f9dc22ce0267d3e3df | 6,199 |
def warn(string: str) -> str:
"""Add warn colour codes to string
Args:
string (str): Input string
Returns:
str: Warn string
"""
return "\033[93m" + string + "\033[0m" | 0bdbe5e7052e1994d978e45273baef75a1b72d89 | 6,200 |
def normalized_mean_square_error(logits, labels, axis = [0,1,2,3]):
"""
logits : [batch_size, w, h, num_classes]
labels : [batch_size, w, h, 1]
"""
with tf.name_scope("normalized_mean_square_error"):
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(logits, labels), axis=[1,2,3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(labels), axis=[1,2,3]))
nmse = tf.reduce_mean(nmse_a / nmse_b)
return nmse | 0aee175ed0be3132d02018961265461e4880221b | 6,201 |
def get_partition_to_num_rows(
namespace, tablename, partition_column, partition_column_values
):
"""
Helper function to get total num_rows in hive for given
partition_column_values.
"""
partitions = {
"{0}={1}".format(partition_column, partition_column_value)
for partition_column_value in partition_column_values
}
# Setting higher number of retries, as during testing, sometimes default
# "retries" values didn't seem enough in some cases.
ms = metastore.metastore(
namespace=namespace,
meta_only=True,
retries=10,
# timeout in milliseconds.
timeout=1800000,
)
partition_to_num_rows = {}
all_partitions = ms.get_partitions(tablename)
for hive_partition in all_partitions:
assert "numRows" in hive_partition.parameters, (
"numRows not in hive_partition.parameters,"
"Do not use Presto tables, only Hive tables!')"
)
if hive_partition.partitionName in partitions:
patition_column_value = hive_partition.partitionName.split("=")[1]
partition_to_num_rows[patition_column_value] = int(
hive_partition.parameters["numRows"]
)
return partition_to_num_rows | 305d40fd326bc45e906925b94077182584ffe3be | 6,203 |
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = initialize_game()
card_title = "Welcome"
speech_output = "Hello! I am Cookoo. Let's play a game. " \
"Are you ready to play?"
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output)) | 9c28194575013e98d1d6130a956714f65ebe3764 | 6,204 |
def kl_divergence_with_logits(logits_a, logits_b):
"""
Compute the per-element KL-divergence of a batch.
Args:
logits_a: tensor, model outputs of input a
logits_b: tensor, model outputs of input b
Returns:
Tensor of per-element KL-divergence of model outputs a and b
"""
a = tf.nn.softmax(logits_a, axis=1)
a_loga = tf.reduce_sum(a * log_softmax(logits_a), 1)
a_logb = tf.reduce_sum(a * log_softmax(logits_b), 1)
return a_loga - a_logb | 7df5976287edf5de37291db653a4334ed046a2f3 | 6,205 |
import csv
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
label_tsv = open(abs_path, encoding="utf-8")
labels = list(csv.reader(label_tsv, delimiter="\t"))
return labels | 8ded58965dcc98b7a0aaa6614cbe4b66722dc76b | 6,206 |
def cut_tree_balanced(linkage_matrix_Z, max_cluster_size, verbose=False):
"""This function performs a balanced cut tree of a SciPy linkage matrix built using any linkage method
(e.g. 'ward'). It builds upon the SciPy and Numpy libraries.
The function looks recursively along the hierarchical tree, from the root (single cluster gathering
all the samples) to the leaves (i.e. the clusters with only one sample), retrieving the biggest
possible clusters containing a number of samples lower than a given maximum. In this way, if a
cluster at a specific tree level contains a number of samples higher than the given maximum, it is
ignored and its offspring (smaller) sub-clusters are taken into consideration. If the cluster contains
a number of samples lower than the given maximum, it is taken as result and its offspring sub-clusters
not further processed.
Input parameters:
linkage_matrix_Z: linkage matrix resulting from calling the method scipy.cluster.hierarchy.ward()
I.e. it contains the hierarchical clustering encoded as a linkage matrix.
max_cluster_size: maximum number of data samples contained within the resulting clusters. Thus, all
resulting clusters will contain a number of data samples <= max_cluster_size.
Note that max_cluster_size must be >= 1.
verbose: activates (True) / deactivates (False) some output print commands, which can be useful to
test and understand the proposed tree cut method.
Returns:
vec_cluster_id: one-dimensional numpy array of integers containing for each input sample its corresponding
cluster id. The cluster id is an integer which is higher for deeper tree levels.
vec_last_cluster_level: one-dimensional numpy array of arrays containing for each input sample its
corresponding cluster tree level, i.e. a sequence of 0s and 1s. Note that the cluster level is longer for
deeper tree levels, being [0] the root cluster, [0, 0] and [0, 1] its offspring, and so on. Also note that
in each cluster splitting, the label 0 denotes the bigger cluster, while the label 1 denotes the smallest.
"""
try:
# Assert that the input max_cluster_size is >= 1
assert max_cluster_size >= 1
# Perform a full cut tree of the linkage matrix, i.e. containing all tree levels
full_cut = cut_tree(linkage_matrix_Z)
if verbose:
print("Interim full cut tree (square matrix)")
print("Shape = " + str(full_cut.shape))
print(full_cut)
print('')
# Initialize the vble containing the current cluster id (it will be higher for each newly
# found valid cluster, i.e. for each found cluster with <= max_cluster_size data samples)
last_cluster_id = 1
# Initialize the resulting cluster id vector (containing for each row in input_data_x_sample
# its corresponding cluster id)
vec_cluster_id = np.zeros(full_cut.shape[1], dtype=int)
# Initialize the resulting cluster level vector (containing for each data sample its
# corresponding cluster tree level, i.e. a string of '0's and '1's separated by '.')
vec_last_cluster_level = np.empty((full_cut.shape[1],), dtype=object)
for i in range(full_cut.shape[1]): vec_last_cluster_level[i] = np.array([0],int)
# Scan the full cut matrix from the last column (root tree level) to the first column (leaves tree level)
if verbose:
print("Note about columns: within the full cut tree, the column " + str(full_cut.shape[1]-1) +
" represents the root, while 0 represent the leaves.")
print("We now scan the full cut tree from the root (column " + str(full_cut.shape[1]-1) + ") "
"to the leaves (column 0).")
print('')
for curr_column in range(full_cut.shape[1]-1,-1,-1):
# Get a list of unique group ids and their count within the current tree level
values, counts = np.unique(full_cut[:,curr_column], return_counts=True)
# Stop if all samples have been already selected (i.e. if all data samples have been already clustered)
if (values.size==1) and (values[0]==-1):
break
# For each group id within the current tree level
for curr_elem_pos in range(values.size):
# If it is a valid group id (i.e. not yet marked as processed with -1) ...
# Note: data samples which were alredy included in a valid cluster id (i.e. at a higher tree level)
# are marked with the group id -1 (see below)
if (values[curr_elem_pos] >= 0):
# Select the current group id
selected_curr_value = values[curr_elem_pos]
# Look for the vector positions (related to rows in input_data_x_sample) belonging to
# the current group id
selected_curr_elems = np.where(full_cut[:,curr_column]==selected_curr_value)
# Major step #1: Populate the resulting vector of cluster levels for each data sample
# If we are not at the root level (i.e. single cluster gathering all the samples) ...
if curr_column < (full_cut.shape[1]-1):
# Get the ancestor values and element positions
selected_ancestor_value = full_cut[selected_curr_elems[0][0],curr_column+1]
selected_ancestor_elems = np.where(full_cut[:,curr_column+1]==selected_ancestor_value)
# Compute the values and counts of the offspring (i.e. curr_elem + brothers) and sort them
# by their count (so that the biggest cluster gets the offspring_elem_label = 0, see below)
offspring_values, offspring_counts = np.unique(full_cut[selected_ancestor_elems,curr_column],
return_counts=True)
count_sort_ind = np.argsort(-offspring_counts)
offspring_values = offspring_values[count_sort_ind]
offspring_counts = offspring_counts[count_sort_ind]
# If the number of descendants is > 1 (i.e. if the curr_elem has at least one brother)
if (offspring_values.shape[0] > 1):
# Select the position of the current value (i.e. 0 or 1) and append it to the cluster level
offspring_elem_label = np.where(offspring_values==selected_curr_value)[0][0]
for i in selected_curr_elems[0]:
vec_last_cluster_level[i] = np.hstack((vec_last_cluster_level[i], offspring_elem_label))
# Major step #2: Populate the resulting vector of cluster ids for each data sample,
# and mark them as already clustered (-1)
# If the number of elements is below max_cluster_size ...
if (counts[curr_elem_pos] <= max_cluster_size):
if verbose:
print("Current column in full cut tree = " + str(curr_column))
print("list_group_ids: " + str(values))
print("list_count_samples: " + str(counts))
print("selected_curr_value: " + str(selected_curr_value) + ", count_samples = " +
str(counts[curr_elem_pos]) + ", marked as result")
print('')
# Relate these vector positions to the current cluster id
vec_cluster_id[selected_curr_elems] = last_cluster_id
# Delete these vector positions at the lower tree levels for further processing
# (i.e. mark these elements as already clustered)
full_cut[selected_curr_elems,0:curr_column] = -1
# Update the cluster id
last_cluster_id = last_cluster_id + 1
# Return the resulting clustering array (containing for each row in input_data_x_sample its
# corresponding cluster id) and the clustering level
return vec_cluster_id, vec_last_cluster_level
except AssertionError:
print("Please use a max_cluster_size >= 1") | 53290f432b9ad7404760e124ffe6d03e95e5d529 | 6,207 |
from typing import Callable
def len_smaller(length: int) -> Callable:
"""Measures if the length of a sequence is smaller than a given length.
>>> len_smaller(2)([0, 1, 2])
False
"""
def len_smaller(seq):
return count(seq) < length
return len_smaller | a43f1344a46a57d443d267de99ba7db08b9bf911 | 6,208 |
def e_2e_fun(theta, e_init=e_1f):
"""
Electron energy after Compton scattering, (using energy e_1f)
:param theta: angle for scattered photon
:param e_init: initial photon energy
:return:
"""
return e_init / (((m_e * c ** 2) / e_init) * (1 / (1 - np.cos(theta))) + 1) | 8785f6dfbb4226df88e6ab2b883a989ff799d240 | 6,209 |
from typing import List
def interval_list_intersection(A: List[List], B: List[List], visualization: bool = True) -> List[List]:
"""
LeteCode 986: Interval List Intersections
Given two lists of closed intervals, each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
Examples:
1. A: [[0, 2], [5, 10], [13, 23], [24, 25]], B: [[1, 5], [8, 12], [15, 24], [25, 26]]
return: [[1, 2], [5, 5], [8, 10], [15, 23], [24, 24], [25, 25]]
"""
res = []
i = j = 0
while i < len(A) and j < len(B):
s = max(A[i][0], B[j][0])
e = min(A[i][1], B[j][1])
if s <= e:
res.append([s, e])
if A[i][1] < B[j][1]:
i += 1
else:
j += 1
if visualization:
interval_list_intersection_visualization(A, B, res)
return res | 722902e4c4c076a1dc25d07cc3253b2ec9f3d110 | 6,212 |
def tokenize_query(query):
""" Tokenize a query """
tokenized_query = tokenizer.tokenize(query)
stop_words = set(nltk.corpus.stopwords.words("english"))
tokenized_query = [
word for word in tokenized_query if word not in stop_words]
tokenized_query = [stemmer.stem(word) for word in tokenized_query]
tokenized_query = [word.lower() for word in tokenized_query]
return tokenized_query | 422d59dc95661496dcfac83f142190a94127ae68 | 6,214 |
def rewrite_return(func):
"""Rewrite ret ops to assign to a variable instead, which is returned"""
ret_normalization.run(func)
[ret] = findallops(func, 'ret')
[value] = ret.args
ret.delete()
return value | d141ae9d2f36f4f3e41da626ed43a3902e43c267 | 6,215 |
def get_loss_fn(loss_factor=1.0):
"""Gets a loss function for squad task."""
def _loss_fn(labels, model_outputs):
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
return squad_loss_fn(
start_positions,
end_positions,
start_logits,
end_logits,
loss_factor=loss_factor)
return _loss_fn | ad07afbd39aa1338a0aeb3c1398aefacebceffa3 | 6,216 |
import asyncio
async def run_command(*args):
"""
https://asyncio.readthedocs.io/en/latest/subprocess.html
"""
# Create subprocess
process = await asyncio.create_subprocess_exec(
*args,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
# Return stdout
return stdout.decode().strip() | a0071a1bb8ba169179c67d22f5c8caca717697b3 | 6,217 |
def get_variants_in_region(db, chrom, start, stop):
"""
Variants that overlap a region
Unclear if this will include CNVs
"""
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
variants = list(db.variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart}
}, projection={'_id': False}, limit=SEARCH_LIMIT))
#add_consequence_to_variants(variants)
return list(variants) | 5665f4ff65832449c2dd7edb182fc3bd0707d189 | 6,218 |
def get_business(bearer_token, business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
#4
return request(API_HOST, business_path, bearer_token) | 982eb518b7d9f94b7208fb68ddbc9f6607d9be9a | 6,219 |
from keras.layers import Conv2D, Input, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from keras.models import Model
from keras.regularizers import l2
def AlexNet_modified(input_shape=None, regularize_weight=0.0001):
"""
Alexnet convolution layers with added batch-normalization and regularization
:param input_shape:
:param regularize_weight:
:return:
"""
img_input = Input(shape=input_shape)
#Branch A (mimic the original alexnet)
x = Conv2D(48, (11, 11), strides=(4,4), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(img_input)
x = MaxPooling2D((3,3), strides=(2, 2))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((2, 2))(x)
x = Conv2D(128, (5, 5), strides=(1,1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
# Branch B (mimic the original alexnet)
y = Conv2D(48, (11, 11), strides=(4, 4), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(img_input)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((2, 2))(y)
y = Conv2D(128, (5, 5), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = ZeroPadding2D((1, 1))(y)
out = concatenate([x,y], axis=-1)
inputs = img_input
model = Model(inputs, out, name='alexnet')
return model | b4bf37200a2bf429fe09eb9893b673e381ce0b36 | 6,220 |
import re
def readblock(fileObj):
"""
parse the block of data like below
ORDINATE ERROR ABSCISSA
2.930E-06 1.8D-07 5.00E+02 X.
8.066E-06 4.8D-07 6.80E+02 .X.
1.468E-05 8.3D-07 9.24E+02 ..X.
2.204E-05 1.2D-06 1.26E+03 ...X...
"""
data = []
p = re.compile('ORDINATE')
q = re.compile('0LINEAR COEFFICIENTS')
for line in fileObj:
if q.search(line) is not None:
break
if p.search(line) is None:
dataContent = line[0:31]
dataContent = dataContent.replace('D', 'E')
datarow = list(map(float, dataContent.split()))
data.append(datarow)
return np.array(data) | 838adc5e4efc4f97c255917e8d51b5da398718bd | 6,221 |
def as_scalar(scalar):
"""Check and return the input if it is a scalar.
If it is not scalar, raise a ValueError.
Parameters
----------
scalar : Any
the object to check
Returns
-------
float
the scalar if x is a scalar
"""
if isinstance(scalar, np.ndarray):
assert scalar.size == 1
return scalar[0]
elif np.isscalar(scalar):
return scalar
else:
raise ValueError('expected scalar, got %s' % scalar) | ca5dd15eb2672ec61785dd2a36495d61ad4a3f9f | 6,222 |
import itertools
def evaluate_dnf( # pylint: disable=too-many-arguments,too-many-locals
num_objects: int,
num_vars: int,
nullary: np.ndarray,
unary: np.ndarray,
binary: np.ndarray,
and_kernel: np.ndarray,
or_kernel: np.ndarray,
target_arity: int,
) -> np.ndarray:
"""Evaluate given batch of interpretations."""
# nullary (B, numNullary)
# unary (B, O, numUnary)
# binary (B, O, O-1, numBinary)
# and_kernel (H, IN)
# or_kernel (H,)
# ---------------------------
# We need a binding / permutation matrix that binds every object to every
# variable, so we can evaluate the rule. The following list of tuples,
# tells us which constant each variable is for each permutation
perm_idxs = np.array(
list(itertools.permutations(range(num_objects), num_vars))
) # (K, V)
# ---
# Binary comparison indices for variables, XY XZ YX YZ ...
var_bidxs = np.stack(np.nonzero(1 - np.eye(num_vars))).T # (V*(V-1), 2)
perm_bidxs = perm_idxs[:, var_bidxs] # (K, V*(V-1), 2)
obj_idxs = np.stack(np.nonzero(1 - np.eye(num_objects))).T # (O*(O-1), 2)
# The following matrix tells with variable binding pair is actually the
# object pair we're looking for
var_obj_pairs = (perm_bidxs[..., None, :] == obj_idxs).all(-1)
# (K, V*(V-1), O*(O-1))
# We are guaranteed to have 1 matching pair due to unique bindings, so the
# non-zero elements in the last dimension encode the index we want
var_obj_pairs = np.reshape(np.nonzero(var_obj_pairs)[-1], var_obj_pairs.shape[:2])
# (K, V*(V-1))
# ---------------------------
batch_size = nullary.shape[0] # B
# Take the permutations
perm_unary = unary[:, perm_idxs] # (B, K, V, numUnary)
perm_binary = binary.reshape(
(batch_size, -1, binary.shape[-1])
) # (B, O*(O-1), numBinary)
perm_binary = perm_binary[:, var_obj_pairs] # (B, K, V*(V-1), numBinary)
perm_binary = perm_binary.reshape(
(
batch_size,
var_obj_pairs.shape[0],
num_vars,
num_vars - 1,
perm_binary.shape[-1],
)
)
# (B, K, V, V-1, numBinary)
# ---------------------------
# Merge different arities
flat_nullary = np.repeat(
nullary[:, None], perm_unary.shape[1], axis=1
) # (B, K, numNullary)
interpretation = flatten_interpretation(flat_nullary, perm_unary, perm_binary)
# (B, K, IN)
# ---------------------------
# Evaluate
and_eval = np.min(
interpretation[:, :, None] * and_kernel + (and_kernel == 0), -1
) # (B, K, H)
# ---
# Reduction of existential variables if any, K actually expands to O, O-1 etc numVars many times
# If the arity of the target predicate is 0, then we can reduce over K. If
# it is 1, then expand once then reduce over remaining variables, i.e. O, K//O, H -> (O, H)
shape_range = num_objects - np.arange(num_objects) # [numObjs, numObjs-1, ...]
new_shape = np.concatenate(
[[batch_size], shape_range[:target_arity], [-1, and_eval.shape[-1]]]
) # [B, O, K//O,, H]
and_eval = np.reshape(and_eval, new_shape)
# (B, O, K//0, H)
perm_eval = np.max(and_eval, -2) # (B, H,) if arity 0, (B, O, H) if 1 etc.
# ---
or_eval = np.max(
or_kernel * perm_eval - (or_kernel == 0), -1
) # (B,) if arity 0, (B, O) if 1 etc.
# ---------------------------
return or_eval | 2a73f917594361ba4837e7e1d5f45398b3b0eb8d | 6,223 |
def black_color_func(word, font_size, position, orientation,
random_state=None, **kwargs):
"""Make word cloud black and white."""
return("hsl(0,100%, 1%)") | d5e874a4f62d30abcba29476d0ba7fc3a31b0ca6 | 6,224 |
import re
def setup(hass, config):
""" Setup history hooks. """
hass.http.register_path(
'GET',
re.compile(
r'/api/history/entity/(?P<entity_id>[a-zA-Z\._0-9]+)/'
r'recent_states'),
_api_last_5_states)
hass.http.register_path('GET', URL_HISTORY_PERIOD, _api_history_period)
return True | c87ddf7d7473d49b142a866043c0adee216aed39 | 6,225 |
import itertools
def fitallseq(digitslist, list):
"""if there is repeating digits, itertools.permutations() is still usable
if fail, still print some print, if i >= threshold, served as start point for new searching """
for p in itertools.permutations(digitslist):
#print "".join(pw)
i=0
pw="".join(p)
for seq in list:
if seqfit(seq,pw):
i=i+1
continue
else:
break
if i==nlines:
print("password sequence is found as:", pw)
return True
print("password is not found in all %d digits permutations", len(digitslist))
return False | 069c9a2038593e7146558a53ac86c8fe877b44d3 | 6,227 |
def adduser(args):
"""Add or update a user to the database: <username> <password> [[role] [role] ...]"""
try:
username, password = args[0:2]
except (IndexError, ValueError), exc:
print >> sys.stderr, "you must include at least a username and password: %s" % exc
usage()
try:
roles = args[2:]
except IndexError:
roles = []
try:
store = _store()
user = User(username)
user.set_password(password)
for role in roles:
user.add_role(role)
store.put(user)
except Exception, exc:
print >> sys.stderr, 'unable to create or update user: %s' % exc
raise
return True | 7522753dff0647ac0764078902bf87c888f5a817 | 6,228 |
def check_linear_dependence(matrix: np.ndarray) -> bool:
"""
Functions checks by Cauchy-Schwartz inqeuality whether two matrices are linear dependent or not.
:param matrix: 2x2 matrix to be processed.
:return: Boolean.
"""
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
if i != j:
inner_product = np.inner(matrix[:, i], matrix[:, j])
norm_i = np.linalg.norm(matrix[:, i])
norm_j = np.linalg.norm(matrix[:, j])
print("I: ", matrix[:, i])
print("J: ", matrix[:, j])
print("Prod: ", inner_product)
print("Norm i: ", norm_i)
print("Norm j: ", norm_j)
if np.abs(inner_product - norm_j * norm_i) < 1e-5:
print("Dependent")
return True
else:
print("Independent")
return False | 1b962afc16c135c49409a1cfb1f4c2b6a5695c75 | 6,229 |
import json
def cors_400(details: str = None) -> cors_response:
"""
Return 400 - Bad Request
"""
errors = Model400BadRequestErrors()
errors.details = details
error_object = Model400BadRequest([errors])
return cors_response(
req=request,
status_code=400,
body=json.dumps(delete_none(error_object.to_dict()), indent=_INDENT, sort_keys=True)
if _INDENT != 0 else json.dumps(delete_none(error_object.to_dict()), sort_keys=True),
x_error=details
) | 1f775db943ed0989da49d1b7a6952d7614ace982 | 6,230 |
def detect_label_column(column_names):
""" Detect the label column - which we display as the label for a joined column.
If a table has two columns, one of which is ID, then label_column is the other one.
"""
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
return None | 40524e7ed0878316564ad8fd66a2c09fc892e979 | 6,231 |
import glob
def sorted_files(pattern):
"""Return files matching glob pattern, *effectively* sorted by date
"""
return sort_files(glob.glob(pattern)) | 4fb2ad9f6396cb844320e4e3aeb2941567d8af4a | 6,233 |
import torch
def random_float_tensor(seed, size, a=22695477, c=1, m=2 ** 32, requires_grad=False):
""" Generates random tensors given a seed and size
https://en.wikipedia.org/wiki/Linear_congruential_generator
X_{n + 1} = (a * X_n + c) % m
Using Borland C/C++ values
The tensor will have values between [0,1)
Inputs:
seed (int): an int
size (Tuple[int]): the size of the output tensor
a (int): the multiplier constant to the generator
c (int): the additive constant to the generator
m (int): the modulus constant to the generator
"""
num_elements = 1
for s in size:
num_elements *= s
arr = [(a * seed + c) % m]
for i in range(num_elements - 1):
arr.append((a * arr[i] + c) % m)
return torch.tensor(arr, requires_grad=requires_grad).float().view(size) / m | c6c8ce42b2774204c3156bdd7b545b08315d1606 | 6,234 |
def derivable_rng(spec, *, legacy=False):
"""
Get a derivable RNG, for use cases where the code needs to be able to reproducibly derive
sub-RNGs for different keys, such as user IDs.
Args:
spec:
Any value supported by the `seed` parameter of :func:`seedbank.numpy_rng`, in addition
to the following values:
* the string ``'user'``
* a tuple of the form (``seed``, ``'user'``)
Either of these forms will cause the returned function to re-derive new RNGs.
Returns:
function:
A function taking one (or more) key values, like :func:`derive_seed`, and
returning a random number generator (the type of which is determined by
the ``legacy`` parameter).
"""
if spec == 'user':
return DerivingRNG(derive_seed(), legacy)
elif isinstance(spec, tuple):
seed, key = spec
if key != 'user':
raise ValueError('unrecognized key %s', key)
return DerivingRNG(seed, legacy)
else:
return FixedRNG(rng(spec, legacy=legacy)) | 0772c9d27ba166f0981b3eb1da359a3ebb973322 | 6,235 |
def table(custom_headings, col_headings_formatted, rows, spec):
"""
Create a LaTeX table
Parameters
----------
custom_headings : None, dict
optional dictionary of custom table headings
col_headings_formatted : list
formatted column headings
rows : list of lists of cell-strings
Data in the table, pre-formatted
spec : dict
options for the formatter
Returns
-------
dict : contains key 'latex', which corresponds to a latex string representing the table
"""
longtables = spec['longtables']
table = "longtable" if longtables else "tabular"
if custom_headings is not None \
and "latex" in custom_headings:
latex = custom_headings['latex']
else:
latex = "\\begin{%s}[l]{%s}\n\hline\n" % \
(table, "|c" * len(col_headings_formatted) + "|")
latex += ("%s \\\\ \hline\n"
% (" & ".join(col_headings_formatted)))
for formatted_rowData in rows:
if len(formatted_rowData) > 0:
formatted_rowData_latex = [
(formatted_cell['latex'] if isinstance(formatted_cell, dict)
else formatted_cell) for formatted_cell in formatted_rowData]
latex += " & ".join(formatted_rowData_latex)
#MULTI-ROW support for *data* (non-col-header) rows of table. Currently
# unused (unneeded) - see multirow formatter that is commented out in formatters.py
#multirows = [ ("multirow" in el) for el in formatted_rowData_latex ]
#if any(multirows):
# latex += " \\\\ "
# last = True; lineStart = None; col = 1
# for multi,data in zip(multirows,formatted_rowData_latex):
# if last == True and multi == False:
# lineStart = col #line start
# elif last == False and multi == True:
# latex += "\cline{%d-%d} " % (lineStart,col) #line end
# last=multi
# res = _re.search("multicolumn{([0-9])}",data)
# if res: col += int(res.group(1))
# else: col += 1
# if last == False: #need to end last line
# latex += "\cline{%d-%d} "%(lineStart,col-1)
# latex += "\n"
#else:
latex += " \\\\ \hline\n"
latex += "\end{%s}\n" % table
return {'latex': latex} | 0ca28fce26fc7476aa5b88a621c5476ae8d381ce | 6,236 |
def skipIfNoDB(test):
"""Decorate a test to skip if DB ``session`` is ``None``."""
@wraps(test)
def wrapper(self, db, *args, **kwargs):
if db.session is None:
pytest.skip('Skip because no DB.')
else:
return test(self, db, *args, **kwargs)
return wrapper | a75cc067679aaab3fec78c2310cbc2e34a19cee7 | 6,238 |
def rboxes2quads_numpy(rboxes):
"""
:param rboxes: ndarray, shape = (*, h, w, 5=(4=(t,r,b,l) + 1=angle))
Note that angle is between [-pi/4, pi/4)
:return: quads: ndarray, shape = (*, h, w, 8=(x1, y1,... clockwise order from top-left))
"""
# dists, shape = (*, h, w, 4=(t,r,b,l))
# angles, shape = (*, h, w)
h, w, _ = rboxes.shape[-3:]
dists, angles = rboxes[..., :4], rboxes[..., 4]
# shape = (*, h, w, 5=(t,r,b,l,offset), 2=(x,y))
pts = np.zeros(list(dists.shape[:-1]) + [5, 2], dtype=np.float32)
# assign pts for angle >= 0
dists_pos = dists[angles >= 0]
if dists_pos.size > 0:
# shape = (*, h, w)
tops, rights, bottoms, lefts = np.rollaxis(dists_pos, axis=-1)
shape = tops.shape
pts[angles >= 0] = np.moveaxis(np.array([[np.zeros(shape), -(tops+bottoms)],
[lefts+rights, -(tops+bottoms)],
[lefts+rights, np.zeros(shape)],
[np.zeros(shape), np.zeros(shape)],
[lefts, -bottoms]]), [0, 1], [-2, -1])
# assign pts for angle < 0
dists_neg = dists[angles < 0]
if dists_neg.size > 0:
# shape = (*, h, w)
tops, rights, bottoms, lefts = np.rollaxis(dists_neg, axis=-1)
shape = tops.shape
pts[angles < 0] = np.moveaxis(np.array([[-(lefts+rights), -(tops+bottoms)],
[np.zeros(shape), -(tops+bottoms)],
[np.zeros(shape), np.zeros(shape)],
[-(lefts+rights), np.zeros(shape)],
[-rights, -bottoms]]), [0, 1], [-2, -1])
# note that rotate clockwise is positive, otherwise, negative
angles *= -1
# rotate
# shape = (*, h, w, 2, 2)
R = np.moveaxis(np.array([[np.cos(angles), -np.sin(angles)],
[np.sin(angles), np.cos(angles)]]), [0, 1], [-2, -1])
# shape = (*, h, w, 2=(x, y), 5=(t,r,b,l,offset))
pts = np.swapaxes(pts, -1, -2)
# shape = (*, h, w, 2=(x, y), 5=(t,r,b,l,offset))
rotated_pts = R @ pts
# quads, shape = (*, h, w, 2=(x, y), 4=(t,r,b,l))
# offsets, shape = (*, h, w, 2=(x, y), 1=(offset))
quads, offsets = rotated_pts[..., :4], rotated_pts[..., 4:5]
# align
widths, heights = np.meshgrid(np.arange(w), np.arange(h))
# shape = (h, w, 2)
origins = np.concatenate((np.expand_dims(widths, -1), np.expand_dims(heights, -1)), axis=-1)
# shape = (*, h, w, 2=(x,y), 1)
origins = np.expand_dims(origins, axis=tuple(i for i in range(-1, rboxes.ndim - 3)))
quads += origins - offsets
quads[..., 0, :] = np.clip(quads[..., 0, :], 0, w)
quads[..., 1, :] = np.clip(quads[..., 1, :], 0, h)
# reshape
quads = np.swapaxes(quads, -1, -2).reshape(list(rboxes.shape[:-1]) + [8])
return quads | a5c48d48444f3c063fe912e2c6e76de373f7a1fc | 6,239 |
from typing import Callable
from typing import Optional
from typing import Mapping
from typing import Any
import reprlib
from typing import List
import inspect
from typing import cast
from typing import MutableMapping
def repr_values(condition: Callable[..., bool], lambda_inspection: Optional[ConditionLambdaInspection],
resolved_kwargs: Mapping[str, Any], a_repr: reprlib.Repr) -> List[str]:
"""
Represent function arguments and frame values in the error message on contract breach.
:param condition: condition function of the contract
:param lambda_inspection:
inspected lambda AST node corresponding to the condition function (None if the condition was not given as a
lambda function)
:param resolved_kwargs: arguments put in the function call
:param a_repr: representation instance that defines how the values are represented.
:return: list of value representations
"""
# Hide _ARGS and _KWARGS if they are not part of the condition for better readability
if '_ARGS' in resolved_kwargs or '_KWARGS' in resolved_kwargs:
parameters = inspect.signature(condition).parameters
malleable_kwargs = cast(
MutableMapping[str, Any],
resolved_kwargs.copy() # type: ignore
)
if '_ARGS' not in parameters:
malleable_kwargs.pop('_ARGS', None)
if '_KWARGS' not in parameters:
malleable_kwargs.pop('_KWARGS', None)
selected_kwargs = cast(Mapping[str, Any], malleable_kwargs)
else:
selected_kwargs = resolved_kwargs
# Don't use ``resolved_kwargs`` from this point on.
# ``selected_kwargs`` is meant to be used instead for better readability of error messages.
if is_lambda(a_function=condition):
assert lambda_inspection is not None, "Expected a lambda inspection when given a condition as a lambda function"
else:
assert lambda_inspection is None, "Expected no lambda inspection in a condition given as a non-lambda function"
reprs = None # type: Optional[MutableMapping[str, Any]]
if lambda_inspection is not None:
variable_lookup = collect_variable_lookup(condition=condition, resolved_kwargs=selected_kwargs)
recompute_visitor = icontract._recompute.Visitor(variable_lookup=variable_lookup)
recompute_visitor.visit(node=lambda_inspection.node.body)
recomputed_values = recompute_visitor.recomputed_values
repr_visitor = Visitor(
recomputed_values=recomputed_values, variable_lookup=variable_lookup, atok=lambda_inspection.atok)
repr_visitor.visit(node=lambda_inspection.node.body)
reprs = repr_visitor.reprs
# Add original arguments from the call unless they shadow a variable in the re-computation.
#
# The condition arguments are often not sufficient to figure out the error. The user usually needs
# more context which is captured in the remainder of the call arguments.
if reprs is None:
reprs = dict()
for key in sorted(selected_kwargs.keys()):
val = selected_kwargs[key]
if key not in reprs and _representable(value=val):
reprs[key] = val
parts = [] # type: List[str]
# We need to sort in order to present the same violation error on repeated violations.
# Otherwise, the order of the reported arguments may be arbitrary.
for key in sorted(reprs.keys()):
value = reprs[key]
if isinstance(value, icontract._recompute.FirstExceptionInAll):
writing = ['{} was False, e.g., with'.format(key)]
for input_name, input_value in value.inputs:
writing.append('\n')
writing.append(' {} = {}'.format(input_name, a_repr.repr(input_value)))
parts.append(''.join(writing))
else:
parts.append('{} was {}'.format(key, a_repr.repr(value)))
return parts | d7218029fd387bae108eedf49c9eef14d98e3c70 | 6,240 |
def human_permissions(permissions, short=False):
"""Get permissions in readable form.
"""
try:
permissions = int(permissions)
except ValueError:
return None
if permissions > sum(PERMISSIONS.values()) or permissions < min(
PERMISSIONS.values()
):
return ""
rez = []
for k, v in PERMISSIONS.items():
if permissions & v == v:
rez.append(k)
if short:
return "".join(((x.split("_")[1][:1]).lower() for x in rez))
else:
return " | ".join(rez) | 0d9c15659c93833042f44a0a96746e2f1dd9d307 | 6,241 |
def predict():
"""
Prediction end point
Post a JSON holding the features and expect a prediction
Returns
-------
JSON
The field `predictions` will hold a list of 0 and 1's corresponding
to the predictions.
"""
logger.info('Starting prediction')
json_ = request.get_json()
query_df = pd.DataFrame(json_)
query = tm.prepare_data(query_df, train=False)
prediction = clf.predict(query)
prediction = [int(x) for x in prediction]
logger.info("Prediction is ready")
return jsonify({'prediction': prediction}) | 6899725edff8d2536c4a97018a5c6c7a4e0d416e | 6,242 |
from unittest.mock import call
def run_program(program, cmdargs, stdin_f, stdout_f, stderr_f,
run=True, cmd_prepend="", run_from_cmd=True,
**kwargs):
"""Runs `program` with `cmdargs` using `subprocess.call`.
:param str stdin_f: File from which to take standard input
:param str stdout_f: File in which to put standard output
:param str stderr_f: File in which to put standard error
:param bool run: Whether to actually run `program`
If `True` the program return code is returned.
If false a string pointing to the script which will run
the program is returned
:param str cmd_prepend: Put in the beginning of the bash script
:param bool run_from_cmd: Run `program` using the generated bash
script instead of running it directly
"""
time_file_name = '.'.join(stdout_f.split('.')[:-1])+'.time'
cmd_file_name = '.'.join(stdout_f.split('.')[:-1])+'.sh'
with open(cmd_file_name, 'w') as cmd_file:
cmd = ' '.join([program]+cmdargs)
time_cmd = "/usr/bin/time -o {time_file}".format(time_file=time_file_name)
cmd = "{time_cmd} {cmd} 1> {stdout} 2> {stderr} \n".format(time_cmd=time_cmd,
cmd=cmd,
stdout=stdout_f,
stderr=stderr_f)
cmd = cmd_prepend + cmd
cmd_file.write(cmd)
if run:
with OpenWithNone(stdin_f, 'r') as input_file, open(stdout_f, 'w') as stdout_file, open(stderr_f, 'w') as stderr_file:
if run_from_cmd:
retcode = call(["bash", cmd_file_name], **kwargs)
else:
try:
with open(time_file_name, 'w') as time_file:
with print_time(time_file):
retcode = call([program]+cmdargs, stdin=input_file,
stdout=stdout_file, stderr=stderr_file, **kwargs)
except Exception as e:
print(e)
print('program ', program)
print('cmdargs', cmdargs)
print('stdin ', stdin_f)
print('stdout ', stdout_f)
print('stderr ', stderr_f)
# print 'kwargs ', kwargs
print(getcwd())
raise
replace_string_in_file(stdout_f, '\r', '\n')
return retcode
else:
return cmd_file_name | aea74ec8ac296567b16e6f76eed1360e8bc76f69 | 6,243 |
def second_step_red(x: np.array, y: np.array, z: np.array,
px: np.array, py: np.array, pz: np.array,
Fx: np.array, Fy: np.array, Fz: np.array,
z_start: float, z_stop: float) -> (np.array, np.array, np.array,
np.array, np.array, np.array):
""" Second step for Relativictic Difference Scheme
"""
n = int(len(x))
for i in prange(n):
if z[i] >= z_start and z[i] <= z_stop:
gamma = (1 + px[i]**2 + py[i]**2 + pz[i]**2)**(1/2)
vx = px[i]/gamma
vy = py[i]/gamma
vz = pz[i]/gamma
b2 = 1 + Fx[i]**2 + Fy[i]**2 + Fz[i]**2
b1 = 2 - b2
b3 = 2 * (vx*Fx[i] + vy*Fy[i] + vz*Fz[i])
fx = 2 * (vy*Fz[i] - vz*Fy[i])
fy = 2 * (vz*Fx[i] - vx*Fz[i])
fz = 2 * (vx*Fy[i] - vy*Fx[i])
vx = (vx*b1 + fx + Fx[i]*b3)/b2
vy = (vy*b1 + fy + Fy[i]*b3)/b2
vz = (vz*b1 + fz + Fz[i]*b3)/b2
x[i] += vx
y[i] += vy
z[i] += vz
px[i] = vx*gamma
py[i] = vy*gamma
pz[i] = vz*gamma
else:
gamma = (1 + px[i]**2 + py[i]**2 + pz[i]**2)**(1/2)
vz = pz[i]/gamma
z[i] += vz
return x, y, z, px, py, pz | 909f16a51074ca0c52641d3539509e513ca4ac80 | 6,244 |
def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy):
"""Drop a point from the tabu search list."""
if len(tabulist) < tabulistsize:
return tabulist
if tabustrategy == 'oldest':
tabulist.pop(0)
else:
distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1))
index = np.argmax(distance)
tabulist.pop(index)
return tabulist | 4cd8887bdd77bb001635f0fba57f5908f3451642 | 6,245 |
def get_atom_feature_dims(list_acquired_feature_names):
""" tbd
"""
return list(map(len, [CompoundKit.atom_vocab_dict[name] for name in list_acquired_feature_names])) | 575de38dc0fdd198f6a6eb5cbb972063260bc4d4 | 6,246 |
def parse_selector(selector):
"""Parses a block of selectors like div .name #tag to class=.name, selector=div and id=#tag.
Returns (selector, id, class[]) """
m_class, m_id, m_selector, m_attr = [], None, None, {}
if selector is not None and type(selector) == str:
selector_labels = selector.split()
for label in selector_labels:
if label.startswith("."):
m_class.append(label)
elif label.startswith("#"):
if m_id is not None:
raise ValueError("Multiple id's are declared in block "+str(selector))
m_id = label
elif label.startswith("@@"):
attribute_block = str(label).split('=')
if len(attribute_block) < 2:
raise ValueError('Attribute does not match the \
format @@<attribute_name>=<attribute_value> without space')
attr = attribute_block[0]
value = attribute_block[1]
mattr[attr] = value
else:
if m_selector is not None:
raise ValueError("Multiple selectors are declared in block "+str(selector))
m_selector = label
if mattr and not m_selector:
raise AssertionError('If selection is done with attribute @@<attr_name>=<attr_value>,\
then it is must to have selector.\n Eg: <selector> @@<attr_name>=<attr_value>"')
return m_selector, m_id, m_class, mattr | eadaa4cd79ed933325b0058e752a7187d5a09085 | 6,247 |
def is_batch_enabled(release_id):
"""
Check whether batching is enabled for a release.
"""
details = get_release_details_by_id(release_id)
return details['data']['attributes']['enable_batching'] | e22965166b35584e172e775b16a9d84affe5868f | 6,248 |
import contextlib
def create(tiles):
"""Handler."""
with futures.ThreadPoolExecutor(max_workers=8) as executor:
responses = executor.map(worker, tiles)
with contextlib.ExitStack() as stack:
sources = [
stack.enter_context(rasterio.open(tile)) for tile in responses if tile
]
dest, output_transform = merge(sources, nodata=-32767)
meta = {
"driver": "GTiff",
"count": 1,
"dtype": np.int16,
"nodata": -32767,
"height": dest.shape[1],
"width": dest.shape[2],
"compress": "DEFLATE",
"crs": "epsg:4326",
"transform": output_transform,
}
memfile = MemoryFile()
with memfile.open(**meta) as dataset:
dataset.write(dest)
return memfile | cd080b0df34b12f8045420ac076f8e9ee6bc7c15 | 6,249 |
def _rfftn_empty_aligned(shape, axes, dtype, order='C', n=None):
"""Patched version of :func:`sporco.fft.rfftn_empty_aligned`.
"""
ashp = list(shape)
raxis = axes[-1]
ashp[raxis] = ashp[raxis] // 2 + 1
cdtype = _complex_dtype(dtype)
return cp.empty(ashp, cdtype, order) | a85ab3a938694a82d186b968a2d7d4c710f1ecde | 6,251 |
def get_test_config():
"""
Returns a basic FedexConfig to test with.
"""
# Test server (Enter your credentials here)
return FedexConfig(key='xxxxxxxxxxxxxxxxx',
password='xxxxxxxxxxxxxxxxxxxxxxxxx',
account_number='xxxxxxxxx',
meter_number='xxxxxxxxxx',
use_test_server=True) | 81b29fbb135b30f24aa1fe7cb32844970617f0ee | 6,252 |
from datetime import datetime
import itertools
import json
import logging
import csv
def write_data(movies, user, data_format='json'):
"""
"""
assert movies, 'no data to write'
date = datetime.now().strftime('%Y%m%d')
movies_clean = itertools.chain.from_iterable((json.loads(el) for el in movies))
movies_clean = tuple(movies_clean)
if data_format == 'all':
file_formats = ('csv', 'json')
else:
file_formats = (data_format, )
if 'json' in file_formats:
file_name = f'{user}_filmweb_{date}.json'
with open(file_name, 'w', encoding='utf-8') as out_file:
out_file.write(json.dumps(movies_clean))
logging.info(f'{file_name} written!')
if 'csv' in file_formats:
file_name = f'{user}_filmweb_{date}.csv'
with open(file_name, 'w', encoding='utf-8') as out_file:
writer = csv.DictWriter(out_file, fieldnames=CSV_ROWS, dialect='unix')
writer.writeheader()
for movie in movies_clean:
writer.writerow(movie)
logging.info(f'{file_name} written!')
return file_name | 704ebf1aa1b45855b8fade61cdf6a9bb12e44c83 | 6,253 |
import json
import tempfile
def get_genotypes(
single_end: list,
paired_end: list,
metadata: str,
bam_dir: str,
intermediate_dir: str,
reference_genome_path: str,
mapping_quality: int,
blacklist_path: str,
snps_path: str,
processes: int,
memory: int,
skip_preprocessing: bool = False,
write_bam: bool = False,
algorithm_switch_bp: int = 70,
algorithm=None,
temp_dir=None
):
"""Obtain genotypes from sequencing data using QuASAR
Parameters
----------
single_end : list
List of single-end input files
paired_end : list
List of paired-end input files
metadata : dict
Dict of input file metadata
bam_dir : str
Directory to write BAM files
intermediate_dir : str
Directory to write intermediate pileup / bed files
reference_genome_path : str
Path to reference genome
mapping_quality : int
Minimum quality score for filtering alignment
blacklist_path : str
Path to ENCODE mappability blacklist
snps_path : str
Path to file containing SNPs to genotype
processes : int
Number of processes
memory : int
Memory limit
skip_preprocessing : bool
Indicator to skip preprocessing steps
write_bam : bool
Indicator to write a BAM file to disk
algorithm_switch_bp : int
Read length threshold for switching to `bwa mem`
algorithm : str or None
Force use of either `aln` or `mem` algorithm, if supplied
temp_dir
directory to use for temporary files
"""
n_single_end = len(single_end)
n_paired_end = len(paired_end)
if not metadata:
metadata_dict = {}
else:
with open(metadata, 'r') as f:
metadata_dict = json.load(f)
n_metadata = sum(len(x['libraries']) for x in metadata_dict.values())
def prepare_quasar_input_params(temp_dir_name, n, pe=False):
return {
'bam_dir': bam_dir if bam_dir else temp_dir_name,
'intermediate_dir': (
intermediate_dir if intermediate_dir
else temp_dir_name
),
'reference_genome_path': reference_genome_path,
'mapping_quality': mapping_quality,
'blacklist_path': blacklist_path,
'snps_path': snps_path,
'processes': max(1, int(processes / n)),
'memory': memory / min(processes, n),
'paired_end': pe,
'skip_preprocessing': skip_preprocessing,
'write_bam': write_bam,
'algorithm_switch_bp': algorithm_switch_bp,
'algorithm': algorithm,
'temp_dir': temp_dir
}
with tempfile.TemporaryDirectory(dir=temp_dir) as temp_dir_name:
with Pool(processes=min(processes, max(n_single_end, n_paired_end, n_metadata))) as pool:
if n_single_end > 0:
single_end_quasar_input_paths = pool.map(
partial(
prepare_quasar_input,
**prepare_quasar_input_params(temp_dir_name, n_single_end, pe=False)
),
single_end
)
else:
single_end_quasar_input_paths = []
if n_paired_end > 0:
paired_end_quasar_input_paths = pool.map(
partial(
prepare_quasar_input,
**prepare_quasar_input_params(temp_dir_name, n_paired_end, pe=True)
),
paired_end
)
else:
paired_end_quasar_input_paths = []
if n_metadata > 0:
meta_se, meta_pe = collate_metadata(metadata_dict)
if len(meta_se) > 0:
metadata_quasar_input_paths_se = pool.starmap(
partial(
prepare_quasar_input_from_metadata,
**prepare_quasar_input_params(temp_dir_name, len(meta_se), pe=False)
),
meta_se
)
else:
metadata_quasar_input_paths_se = []
if len(meta_pe) > 0:
metadata_quasar_input_paths_pe = pool.starmap(
partial(
prepare_quasar_input_from_metadata,
**prepare_quasar_input_params(temp_dir_name, len(meta_pe), pe=True)
),
meta_pe
)
else:
metadata_quasar_input_paths_pe = []
else:
metadata_quasar_input_paths_se, metadata_quasar_input_paths_pe = [], []
return pyQuASAR.genotype(
*filter(
None,
single_end_quasar_input_paths
+ paired_end_quasar_input_paths
+ metadata_quasar_input_paths_se
+ metadata_quasar_input_paths_pe
)
) | 7ee61a9b8dfbbedf7d595034a40ae9084e1fa69f | 6,254 |
def async_handle_google_actions(hass, cloud, payload):
"""Handle an incoming IoT message for Google Actions."""
result = yield from ga.async_handle_message(
hass, cloud.gactions_config, payload)
return result | 1c9ec2e37a1c752abb59301f546db4e14fdf57d8 | 6,255 |
def get_picture_landmarks(filepath, predictor, logs=True):
"""
Do the doc!
"""
if logs:
print("Processing file: {}".format(filepath))
frame = cv2.imread(filepath)
lm = FLandmarks()
lm.extract_points(frame, predictor)
return lm
if logs:
print('\n') | 02b92c663c9efe3fad18b35b3808e0b004b1a8c0 | 6,256 |
def conflict(next_x: int, s: tuple) -> bool:
"""Return a boolean that defines the conflict condition of the next queen's position"""
next_i = len(s)
for i in range(next_i):
if abs(s[i] - next_x) in (0, next_i - i):
return True
else:
return False | cc29b142e1cc799c0a305523b713c5085af25fd0 | 6,257 |
async def main_page():
"""Main page. Just for example."""
return APIResponse(message="ok") | f1a2022df08725388c02dabe77bc4ee29eb5f968 | 6,258 |
from typing import List
def split_to_sublists(initial_list:list, n:int, strict:bool=True) -> List[list]:
"""Takes a list and splits it into sublists of size n
Parameters
----------
initial_list : list
The initial list to split into sublists
n : int
The size of each sublist
strict: bool
Whether to force an error if the length of the initial list is not divisible by n (split into even groups), default True
Returns
-------
List[list]
A list of lists of size n (unless strict is False, then the last list may be > n)
Examples
--------
### Split gallery images into sublists of 3
#### JINJA USAGE
```jinja2
{% if gallery|length % 3 == 0 %}
{% for sublist in gallery|split_to_sublists(3) %}
<div class="row">
<div class="col-md-4">
<img src="{{ sublist.0[0]['file_path'] }}" alt="{{ sublist.0[0]['file_path'].split()[-1] }}">
</div>
<div class="col-md-4">
<img src="{{ sublist.1[0]['file_path'] }}" alt="{{ sublist.1[0]['file_path'].split()[-1]}}">
</div>
<div class="col-md-4">
<img src="{{ sublist.2[0]['file_path'] }}" alt="{{ sublist.2[0]['file_path'].split()[-1] }}">
</div>
</div>
{% endfor %}
{% endif }
```
The above jinja is roughly equivalent to something like this in pure python:
```python
gallery = ["image 1" , "image 2", "image 3", "image 4" , "image 5", "image 6"]
if len(images) % 3 == 0:
for sublist in split_to_sublists(gallery, 3): # Returns [["image 1" , "image 2", "image 3"], ["image 4" , "image 5", "image 6"]]
... # Do stuff with each sublist
```
"""
if strict:
if not len(initial_list) % n == 0:
raise ValueError(f"Provided list was not of correct size: \n\tList: {initial_list}\n\tSegment size {n}")
result = []
for i in range(0, len(initial_list), n): # Create sublists up to size n
result.append( initial_list[i:i + n])
return result | fcca74f9814020c99aaf8b31f092ca3ca9533216 | 6,259 |
from pathlib import Path
def get_sha1(req_path: Path) -> str:
""" For larger files sha1 algorithm is significantly faster than sha256 """
return get_hash(req_path, sha1) | 768f101fe4ad57eaea9ccd68d247e6a85b1cebaa | 6,261 |
def _make_note(nl_transcript: str, tl_audio_file: str) -> Note:
"""
Creates an Anki note from a native langauge transcript and a target language audio file.
"""
return Note(model=_MODEL, fields=[f"[sound:{tl_audio_file}]", nl_transcript]) | 4765e39b2c3a7794fb973de2b9424bad361cbe4c | 6,263 |
from datetime import datetime
def bed2beddb_status(connection, **kwargs):
"""Searches for small bed files uploaded by user in certain types
Keyword arguments:
lab_title -- limit search with a lab i.e. Bing+Ren, UCSD
start_date -- limit search to files generated since a date formatted YYYY-MM-DD
run_time -- assume runs beyond run_time are dead (default=24 hours)
"""
start = datetime.utcnow()
check = CheckResult(connection, 'bed2beddb_status')
my_auth = connection.ff_keys
check.action = "bed2beddb_start"
check.brief_output = []
check.full_output = {}
check.status = 'PASS'
check.summary = ''
# These are the accepted file types for this check
accepted_types = ['LADs', 'boundaries', 'domain calls', 'peaks']
# check indexing queue
check, skip = wfr_utils.check_indexing(check, connection)
if skip:
return check
# Build the query (find bg files without bw files)
query = ("/search/?type=FileProcessed&file_format.file_format=bed"
"&extra_files.file_format.display_title!=beddb"
"&status!=uploading&status!=to be uploaded by workflow"
"&status!=archived&status!=archived to project")
query += "".join(["&file_type=" + i for i in accepted_types])
# add date
s_date = kwargs.get('start_date')
if s_date:
query += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query += '&lab.display_title=' + lab
# build a second query for checking failed ones
query_f = ("/search/?type=FileProcessed&file_format.file_format=bed"
"&extra_files.file_format.display_title=beddb"
"&extra_files.status=uploading"
"&extra_files.status=to be uploaded by workflow"
"&status!=uploading&status!=to be uploaded by workflow")
# add date
s_date = kwargs.get('start_date')
if s_date:
query_f += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query_f += '&lab.display_title=' + lab
# The search
res_one = ff_utils.search_metadata(query, key=my_auth)
res_two = ff_utils.search_metadata(query_f, key=my_auth)
res_all = res_one + res_two
missing = []
for a_file in res_all:
if not a_file.get('genome_assembly'):
missing.append(a_file['accession'])
res_all = [i for i in res_all if i.get('genome_assembly')]
if not res_all:
check.summary = 'All Good!'
return check
check = wfr_utils.check_runs_without_output(res_all, check, 'bedtobeddb', my_auth, start)
if missing:
check.full_output['missing_assembly'] = missing
msg = str(len(missing)) + ' files missing genome assembly'
check.brief_output.insert(0, msg)
return check | 2fb1f67cc256bc1ff04c4a5e8c1fa61f43f69d30 | 6,264 |
def parse_urdf_file(package_name, work_name):
""" Convert urdf file (xml) to python dict.
Using the urdfpy package for now.
Using the xml package from the standard library could be
easier to understand. We can change this in the future
if it becomes a mess.
"""
rospack = rospkg.RosPack()
filepath = rospack.get_path(package_name)
filepath += REL_WORK_PATH
urdf = urdfpy.URDF.load(filepath + work_name + ".urdf")
d = {"links": {}, "joints": {}}
for link in urdf.links:
if link.name == "world" or link.name == "work":
continue
else:
d["links"][link.name] = parse_link(link, filepath)
for joint in urdf.joints:
p = PoseStamped()
p.header.frame_id = joint.parent
p.pose = numpy_to_pose(joint.origin)
d["joints"][joint.name] = {
"pose": p,
"parent": joint.parent,
"child": joint.child
}
return d | 7b209216d9f65303441e5e9f761119bfa9fc5810 | 6,265 |
def _get_mgmtif_mo_dn(handle):
"""
Internal method to get the mgmt_if dn based on the type of platform
"""
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
return("sys/rack-unit-1/mgmt/if-1")
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
return("sys/chassis-1/if-1")
else:
raise ImcValidationException("Invalid platform detected:%s" %
handle.platform) | 455c5baf0f659b98c78bfcc386bd03e0850df267 | 6,266 |
def sectionize(parts, first_is_heading=False):
"""Join parts of the text after splitting into sections with headings.
This function assumes that a text was splitted at section headings,
so every two list elements after the first one is a heading-section pair.
This assumption is used to join sections with their corresponding headings.
Parameters
----------
parts : list of str
List of text parts.
first_is_heading : bool
Should first element be treated as heading in lists of length greater than 1.
"""
parts = parts.copy()
if len(parts) <= 1:
return parts
first = []
if not first_is_heading:
first.append(parts[0])
del parts[0]
sections = first + [ "\n".join(parts[i:i+2]) for i in range(0, len(parts), 2) ]
return sections | 402832d55268dc808888f94b95e3a1c991394041 | 6,268 |
def byte_compare(stream_a, stream_b):
"""Byte compare two files (early out on first difference).
Returns:
(bool, int): offset of first mismatch or 0 if equal
"""
bufsize = 16 * 1024
equal = True
ofs = 0
while True:
b1 = stream_a.read(bufsize)
b2 = stream_b.read(bufsize)
if b1 != b2:
equal = False
if b1 and b2:
# we have two different buffers: find first mismatch
for a, b in zip(b1, b2):
if a != b:
break
ofs += 1
break
ofs += len(b1)
if not b1: # both buffers empty
break
return (equal, ofs) | 59adfe50fefdb79edd082a35437018d4b954ec75 | 6,269 |
from re import A
def get_resize_augmentation(image_size, keep_ratio=False, box_transforms=False):
"""
Resize an image, support multi-scaling
:param image_size: shape of image to resize
:param keep_ratio: whether to keep image ratio
:param box_transforms: whether to augment boxes
:return: albumentation Compose
"""
bbox_params = A.BboxParams(
format='pascal_voc',
min_area=0,
min_visibility=0,
label_fields=['class_labels']) if box_transforms else None
if not keep_ratio:
return A.Compose([
A.Resize(
height=image_size[1],
width=image_size[0]
)],
bbox_params=bbox_params)
else:
return A.Compose([
A.LongestMaxSize(max_size=max(image_size)),
A.PadIfNeeded(
min_height=image_size[1], min_width=image_size[0], p=1.0, border_mode=cv2.BORDER_CONSTANT),
],
bbox_params=bbox_params) | 62affae338e16cb0e7fc609d0ee995c728d6ec47 | 6,270 |
def extract_question(metric):
"""Extracts the name and question from the given metric"""
with open(metric) as f:
data = f.readlines()
data = [x.strip() for x in data]
# filter out empty strings
data = list(filter(None, data))
# data[0] = '# Technical Fork'
metric_name = data[0].split(maxsplit=1)[1]
# data[1] = 'Question: question part of the metric'
metric_question = spilt_by_colon(data[1])
# Replace '&' to 'and' to prevent breaking of tables in pandoc
metric_name = metric_name.replace('&', 'and')
metric_question = metric_question.replace('&', 'and')
return metric_name, metric_question | 27ddc25c489d19e1ca17ae80774e20c14208b653 | 6,271 |
def get_side(node, vowels, matcher, r):
"""Get side to which char should be added. r means round (or repeat).
Return 0 or plus int to add char to right,
minus int to left,
None if char node should be avoided.
"""
# check if node has both char neighbours
if node.next is None:
if node.prev is None:
raise Exception()
elif node.prev.syllable:
return -1
else:
return None
elif node.prev is None:
if node.next.syllable:
return 1
else:
return None
# node has both left and right char neighbours
# check if node has at least one syllable neighbour
if node.prev.syllable is None and node.next.syllable is None:
return None
# char matching
right_db = get_db_right_side(node, matcher)
if right_db == 2:
return right_db
elif right_db == 1 and r < 3:
return None
# suffix
suff = get_suffix_side(node, matcher)
if suff != 0:
syllable = node.prev.syllable if suff < 0 else node.next.syllable
return suff if syllable is not None else None
# prefix
pre = get_prefix_side(node, matcher)
if pre != 0:
syllable = node.prev.syllable if pre < 0 else node.next.syllable
return pre if syllable is not None else None
# syllable matching
if node.prev.syllable and node.next.syllable:
sdb = get_db_syllable_side(node, matcher) / 2 + right_db
if abs(sdb) >= 1:
return sdb
# no match in db nor suffixes nor prefixes
if r < 3:
return None
if node.prev in vowels and node.prev.neighbours_consonants(2, syllabic=False):
return -1
# this condition is for c in jablcko
if node.prev.syllabic_consonant_in_the_middle() and node.neighbours_consonants(1):
return -1
elif node.next.syllable:
return 1
elif node.prev.syllable:
return -1
return 0 | b7a34982bed475cacef08faf8f4d6155fc4147fb | 6,272 |
def gap_perform_pruning(model_path, pruned_save_path=None, mode='gap', slim_ratio=0.5,
mask_len=False, full_save=False, full_save_path=None, var_scope='',
ver=1):
""" Interface for GAP pruning step (step2).
Args:
model_path: path to the saved checkpoint,
including 3 files: `.meta', `.data' and `.index'.
pruned_save_path: path to save the pruned data (file in pickle format)
slim_ratio: ratio for model pruning.
Return:
data_dict: the pruned data dict
"""
graph = saver.import_meta_graph(model_path+'.meta', clear_devices=True)
with open('graph_def.pbtxt', 'w') as f:
f.write(str(ops.get_default_graph().as_graph_def(add_shapes=True)))
key_graph = KeyGraph(ops.get_default_graph())
data_dict = key_graph.gap(model_path, pruned_save_path, mode, slim_ratio, mask_len,
full_save, full_save_path, var_scope, ver)
return data_dict | b49b7f5d61113990746ef37d03267805424f10be | 6,273 |
def html_wrap(ptext, owrapper, attribute=''):
"""
Wrap text with html tags.
Input:
ptext -- text to be wrapped
owrapper -- tag to wrap ptext with
attribute -- if set, attribute to add to ptext
If owrapper ends with a newline, then the newline will appear after the
bracket character in the last tag.
Returns the wrapped string value.
"""
wrapper = owrapper.strip()
hdr = '<%s>' % wrapper
if attribute:
hdr = add_attrib(attribute, hdr)
trlr = '</%s>' % wrapper
if owrapper.endswith('\n'):
trlr += '\n'
return hdr + ptext + trlr | 3a9d6fcf165ce6ad46ecc2ab7437b794d03449d9 | 6,274 |
def names(namespace):
"""Return extension names without loading the extensions."""
if _PLUGINS:
return _PLUGINS[namespace].keys()
else:
return _pkg_resources_names(namespace) | de772f9c671b92f9707e333006354d89ba166ae2 | 6,276 |
def cfq_lstm_attention_multi():
"""LSTM+attention hyperparameters tuned for CFQ."""
hparams = common_hparams.basic_params1()
hparams.daisy_chain_variables = False
hparams.batch_size = 1024
hparams.hidden_size = 128
hparams.num_hidden_layers = 2
hparams.initializer = 'uniform_unit_scaling'
hparams.initializer_gain = 1.0
hparams.weight_decay = 0.0
hparams.add_hparam('attention_layer_size', hparams.hidden_size)
hparams.add_hparam('output_attention', True)
hparams.add_hparam('num_heads', 1)
hparams.add_hparam('attention_mechanism', 'bahdanau')
hparams.num_heads = 4
# The remaining hyperparameters were determined as described in the paper:
hparams.batch_size = 2048
hparams.dropout = 0.4
hparams.hidden_size = 512
hparams.learning_rate = 0.03
hparams.num_hidden_layers = 2
return hparams | 7f982aff67a58200c7a297a5cfbfee6cc3c33173 | 6,277 |
def create_modeling_tables(spi_historical, spi_fixtures, fd_historical, fd_fixtures, names_mapping):
"""Create tables for machine learning modeling."""
# Rename teams
for col in ['team1', 'team2']:
spi_historical = pd.merge(spi_historical, names_mapping, left_on=col, right_on='left_team', how='left').drop(columns=[col, 'left_team']).rename(columns={'right_team': col})
spi_fixtures = pd.merge(spi_fixtures, names_mapping, left_on=col, right_on='left_team', how='left').drop(columns=[col, 'left_team']).rename(columns={'right_team': col})
# Combine data
historical = pd.merge(spi_historical, fd_historical, left_on=SPI_KEYS, right_on=FD_KEYS).dropna(subset=ODDS_COLS_MAPPING.keys(), how='any').reset_index(drop=True)
fixtures = pd.merge(spi_fixtures, fd_fixtures, left_on=SPI_KEYS, right_on=FD_KEYS)
# Extract training, odds and fixtures
X = historical.loc[:, ['season'] + SPI_KEYS + INPUT_COLS]
y = historical.loc[:, OUTPUT_COLS]
odds = historical.loc[:, SPI_KEYS + list(ODDS_COLS_MAPPING.keys())].rename(columns=ODDS_COLS_MAPPING)
X_test = fixtures.loc[:, SPI_KEYS + INPUT_COLS]
odds_test = fixtures.loc[:, SPI_KEYS + list(ODDS_COLS_MAPPING.keys())].rename(columns=ODDS_COLS_MAPPING)
# Add average scores columns
for ind in (1, 2):
avg_score = y[['adj_score%s' % ind, 'xg%s' % ind, 'nsxg%s' % ind]].mean(axis=1)
avg_score[avg_score.isna()] = y['score%s' % ind]
y['avg_score%s' % ind] = avg_score
# Add combined odds columns
for target in TARGETS:
if '+' in target:
targets = target.split('+')
odds[target] = combine_odds(odds[targets])
odds_test[target] = combine_odds(odds_test[targets])
# Feature extraction
with np.errstate(divide='ignore', invalid='ignore'):
for df in (X, X_test):
df['quality'] = hmean(df[['spi1', 'spi2']], axis=1)
df['importance'] = df[['importance1', 'importance2']].mean(axis=1)
df['rating'] = df[['quality', 'importance']].mean(axis=1)
df['sum_proj_score'] = df['proj_score1'] + df['proj_score2']
return X, y, odds, X_test, odds_test | bfaab71b64979859b7ec474dbf1805e117d9730d | 6,278 |
import math
import random
def daily_selection():
"""
Select a random piece of material from what is available. A piece is defined
by a newline; every line is a new piece of content.
"""
logger.log("Selecting today's material")
with open(settings.CONTENT, "r") as file:
content = file.readlines()
lines = len(content)
prev = get_previous(int(math.log10(lines)))
selection_index = random.choice(list(range(prev)) + list(range(prev + 1, lines)))
selection = content[selection_index]
selection += ("\n" if selection[-1] != "\n" else "")
logger.log("Selected: " + selection, newline=False)
set_previous(selection_index)
return selection | 2b16be5e02273e539e7f0417ef72d28de91624cb | 6,279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.