content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def matching_poss(poss_1, poss_2):
"""Count how many rows the possibilities have in common.
Arguments:
poss_1 {np.array} -- possibilities 1
poss_2 {np.array} -- possibilities 2
Returns:
int -- the count/matches
"""
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
return matches | 096214d8e2115afd21cbd76b28cafa54574cdfb1 | 3,656,043 |
from collections import Counter
def unk_emb_stats(sentences, emb):
"""Compute some statistics about unknown tokens in sentences
such as "how many sentences contain an unknown token?".
emb can be gensim KeyedVectors or any other object implementing
__contains__
"""
stats = {
"sents": 0,
"tokens": 0,
"unk_tokens": 0,
"unk_types": 0,
"unk_tokens_lower": 0,
"unk_types_lower": 0,
"sents_with_unk_token": 0,
"sents_with_unk_token_lower": 0}
all_types = set()
for sent in sentences:
stats["sents"] += 1
any_unk_token = False
any_unk_token_lower = False
types = Counter(sent)
for ty, freq in types.items():
all_types.add(ty)
stats["tokens"] += freq
unk = ty not in emb
if unk:
any_unk_token = True
stats["unk_types"] += 1
stats["unk_tokens"] += freq
if unk and ty.lower() not in emb:
any_unk_token_lower = True
stats["unk_types_lower"] += 1
stats["unk_tokens_lower"] += freq
if any_unk_token:
stats["sents_with_unk_token"] += 1
if any_unk_token_lower:
stats["sents_with_unk_token_lower"] += 1
stats["types"] = len(all_types)
return stats | 221b88e2124f3b8da2976a337476a11a7276a470 | 3,656,044 |
from typing import List
async def search_dcu(
ldap_conn: LDAPConnection, dcu_id: str = None, uid: str = None, fullname: str = None
) -> List[DCUUser]:
"""
Seach DCU AD for user
Args:
ldap_conn: LDAP connection to use for searching
uid: Usersname to search for
dcu_id: dcu student id number
fullname: Users full name
Returns:
A list of user found in ad matching search criteria
"""
query = "".join(
filter(
None,
[
f"(displayName={fullname})" if fullname else None,
f"(cn={uid})" if uid else None,
f"(id={dcu_id})" if dcu_id else None,
],
)
)
if not query:
return []
res = await ldap_conn.search("o=ad,o=dcu,o=ie", f"(&{query})", attributes=DCU_ATTR)
return [DCUUser.from_ldap(user) for user in res] | 32444b30f1463332f51720eef3167c3495deeaec | 3,656,046 |
def jump(inst_ptr, program, direction):
"""Jump the instruction pointer in the program until matching bracket"""
count = direction
while count != 0:
inst_ptr += direction
char = program[inst_ptr]
if char == '[':
count += 1
elif char == ']':
count -= 1
else:
pass
return inst_ptr | 76c6c4dcf4dbc452e9f2b252522871fcca95c75d | 3,656,047 |
def center_image(IM, method='com', odd_size=True, square=False, axes=(0, 1),
crop='maintain_size', verbose=False, center=_deprecated,
**kwargs):
"""
Center image with the custom value or by several methods provided in
:func:`find_origin()` function.
Parameters
----------
IM : 2D np.array
The image data.
method : tuple or str
either a tuple (float, float), the coordinate of the origin of the
image in the (row, column) format, or a string to specify an automatic
centering method:
``image_center``
the center of the image is used as the origin. The trivial result.
``com``
the origin is found as the center of mass.
``convolution``
the origin is found as the maximum of autoconvolution of the image
projections along each axis.
``gaussian``
the origin is extracted from a fit to a Gaussian function.
This is probably only appropriate if the data resembles a
gaussian.
``slice``
the image is broken into slices, and these slices compared for
symmetry.
odd_size : boolean
if ``True``, the returned image will contain an odd number of columns.
Most of the transform methods require this, so it's best to set this
to ``True`` if the image will subsequently be Abel-transformed.
square : bool
if ``True``, the returned image will have a square shape.
crop : str
determines how the image should be cropped. The options are:
``maintain_size``
return image of the same size. Some regions of the original image
may be lost, and some regions may be filled with zeros.
``valid_region``
return the largest image that can be created without padding.
All of the returned image will correspond to the original image.
However, portions of the original image will be lost.
If you can tolerate clipping the edges of the image, this is
probably the method to choose.
``maintain_data``
the image will be padded with zeros such that none of the original
image will be cropped.
axes : int or tuple
center image with respect to axis ``0`` (vertical), ``1`` (horizontal),
or both axes ``(0, 1)`` (default).
Returns
-------
out : 2D np.array
centered image
"""
if center is not _deprecated:
_deprecate('abel.tools.center.center_image() '
'argument "center" is deprecated, use "method" instead.')
method = center
rows, cols = IM.shape
if odd_size and cols % 2 == 0:
# drop rightside column
IM = IM[:, :-1]
rows, cols = IM.shape
if square and rows != cols:
# make rows == cols, but maintain approx. center
if rows > cols:
diff = rows - cols
trim = diff//2
if trim > 0:
IM = IM[trim: -trim] # remove even number of rows off each end
if diff % 2:
IM = IM[: -1] # remove one additional row
else:
# make rows == cols, check row oddness
if odd_size and rows % 2 == 0:
IM = IM[:-1, :]
rows -= 1
xs = (cols - rows)//2
IM = IM[:, xs:-xs]
rows, cols = IM.shape
# origin is in (row, column) format!
if isinstance(method, string_types):
origin = find_origin(IM, method=method, verbose=verbose, **kwargs)
else:
origin = method
centered_data = set_center(IM, origin=origin, crop=crop, axes=axes,
verbose=verbose)
return centered_data | 7b9793d720228a246df07c08a2aeda861108f92e | 3,656,049 |
def get_mapping(mapping_name):
"""
Reads in the given mapping and returns a dictionary of letters to keys. If the given mapping is a dictionary,
does nothing an returns the mapping
mpaping_name can be a path to different file formats
"""
# read in mapping
if type(mapping_name) == str:
if mapping_name.split(".")[-1] == "mst":
mapping = create_map_from_reformulation(mapping_name)
elif mapping_name.split(".")[-1] == "txt":
mapping = create_map_from_txt(mapping_name)
return mapping
else:
return mapping_name | 1e4f99d14b242ba4e8760fafecd83cd32932a92c | 3,656,051 |
def evaluate(model, reward_gen, n_steps=1000000, delta=1):
"""Evaulate the regrets and rewards of a given model based on a given reward
generator
Args:
model (TYPE): Description
n_steps (int, optional): Description
delta (int, optional): Number of steps for feedback delay
reward_gen (TYPE, optional): Description
Returns:
regrets (list): List of regrets for each round. Regret is the maximum
reward minus the selected action's reward for the round
rewards (list): List of rewards for actions taken
"""
regrets = []
rewards = []
last_rewards = []
last_changes = []
last_selected_actions = []
# initialize successs and failures to 0 for all items
successes = np.zeros(model.n_items)
failures = np.zeros(model.n_items)
for step in range(1, n_steps + 1):
reward_vector, item_changed = reward_gen.get_rewards()
# reinitialize the successes and failures if the item has changed
if item_changed:
successes = np.zeros(model.n_items)
failures = np.zeros(model.n_items)
selected_action = model.get_action(item_changed, successes, failures)
regret = (
np.max(reward_gen.reward_probs) - reward_gen.reward_probs[selected_action]
)
regrets.append(regret)
rewards.append(reward_vector[selected_action])
last_rewards.append(reward_vector[selected_action])
last_changes.append(item_changed)
last_selected_actions.append(selected_action)
# record success or failure of action at appropriate index in
#successes or failures
if reward_vector[selected_action] == 1:
successes[selected_action] += 1
else:
failures[selected_action] += 1
# Feedback if delta steps have passed
if step % delta == 0:
model.update(last_selected_actions, last_rewards, last_changes)
last_rewards = []
last_changes = []
last_selected_actions = []
return regrets, rewards | a326e905156f6ac195eeb993878ae651a13a306e | 3,656,052 |
def get_photo_from_response(response: dict):
"""
parse json response and return an Photo
Keyword arguments:
response -- meetup api response in a dict
return -> get or create Photo
"""
photo, create = Photo.objects.get_or_create(meetup_id=response["id"])
# add optional fields
if "highres_link" in response:
photo.highres_link = response["highres_link"]
if "base_url" in response:
photo.base_url = response["base_url"]
if "photo_link" in response:
photo.photo_link = response["photo_link"]
if "thumb_link" in response:
photo.thumb_link = response["thumb_link"]
if "type" in response:
photo.photo_type = response["type"]
photo.save()
return photo | 9f0aeee796c1131424a7f4292a2b712d2bf0158e | 3,656,053 |
from typing import Union
from typing import List
from typing import Tuple
def composition_plot(adata: AnnData, by: str, condition: str, stacked: bool = True, normalize: bool = True,
condition_sort_by: str = None, cmap: Union[str, List[str], Tuple[str]] = None,
**kwds) -> hv.core.element.Element:
"""
Generate a composition plot, which shows the percentage of observations from every condition within each cluster (by).
Args:
adata: Annotated data matrix.
by: Key for accessing variables of adata.var_names or a field of adata.obs used to group the data.
condition: Key for accessing variables of adata.var_names or a field of adata.obs used to compute counts within a group.
stacked: Whether bars are stacked.
normalize: Normalize counts within each group to sum to one.
condition_sort_by: Sort condition within each group by max, mean, natsorted, or None.
cmap: Color map name (hv.plotting.list_cmaps()) or a list of hex colors. See http://holoviews.org/user_guide/Styling_Plots.html for more information.
"""
adata_raw = __get_raw(adata, False)
keys = [by, condition]
adata_df = __get_df(adata, adata_raw, keys)
for column in adata_df:
if not pd.api.types.is_categorical_dtype(adata_df[column]):
adata_df[column] = adata_df[column].astype(str).astype('category')
cmap = __get_category_cmap(adata_raw, adata_df, condition) if cmap is None else __fix_cmap(adata_df, condition,
cmap)
keywords = dict(stacked=stacked, group_label=condition)
keywords.update(kwds)
invert = keywords.get('invert', False)
if not invert and 'rot' not in keywords:
keywords['rot'] = 90
dummy_df = pd.get_dummies(adata_df[condition])
df = pd.concat([adata_df, dummy_df], axis=1)
df = df.groupby(by).agg(np.sum)
if normalize:
df = df.T.div(df.sum(axis=1)).T
if not (pd.api.types.is_categorical_dtype(df.index) and df.index.dtype.ordered):
df = df.loc[natsorted(df.index)]
secondary = dummy_df.columns.values
if condition_sort_by == 'max' or condition_sort_by == 'mean':
secondary_sort = df.values.max(axis=0) if condition_sort_by == 'max' else df.values.mean(axis=0)
index = np.flip(np.argsort(secondary_sort))
secondary = secondary[index]
elif condition_sort_by == 'natsorted':
secondary = natsorted(secondary)
secondary = list(secondary)
p = df.hvplot.bar(by, secondary, cmap=cmap, **keywords)
p.df = df
return p | f2e588c0ce6d195201754885bbd90aae83b49ba7 | 3,656,054 |
def region_of_province(province_in: str) -> str:
"""
Return the corresponding key in ITALY_MAP whose value contains province_in
:param province_in: str
:return: str
"""
region = None
for r in ITALY_MAP:
for p in ITALY_MAP[r]:
if province_in == p:
region = r
return region | 1aa29235d569929a0cfbbc4258d45ba4f0171f3c | 3,656,055 |
def filter_stopwords(words:list)->iter:
"""
Filter the stop words
"""
words = filter(is_not_stopword, words)
return words | a5516886be0ce5c8671ef259baf38b04d61c511f | 3,656,056 |
def numpy_jaccard(box_a, box_b):
"""计算两组矩形两两之间的iou
Args:
box_a: (tensor) bounding boxes, Shape: [A, 4].
box_b: (tensor) bounding boxes, Shape: [B, 4].
Return:
ious: (tensor) Shape: [A, B]
"""
A = box_a.shape[0]
B = box_b.shape[0]
box_a_x1y1 = np.reshape(box_a[:, 2:], (A, 1, 2))
box_a_x1y1 = np.tile(box_a_x1y1, (1, B, 1))
box_b_x1y1 = np.reshape(box_b[:, 2:], (1, B, 2))
box_b_x1y1 = np.tile(box_b_x1y1, (A, 1, 1))
box_a_x0y0 = np.reshape(box_a[:, :2], (A, 1, 2))
box_a_x0y0 = np.tile(box_a_x0y0, (1, B, 1))
box_b_x0y0 = np.reshape(box_b[:, :2], (1, B, 2))
box_b_x0y0 = np.tile(box_b_x0y0, (A, 1, 1))
max_xy = np.minimum(box_a_x1y1, box_b_x1y1)
min_xy = np.maximum(box_a_x0y0, box_b_x0y0)
inter = np.clip((max_xy - min_xy), 0.0, np.inf)
inter = inter[:, :, 0] * inter[:, :, 1]
area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1]))
area_a = np.reshape(area_a, (A, 1))
area_a = np.tile(area_a, (1, B))
area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1]))
area_b = np.reshape(area_b, (1, B))
area_b = np.tile(area_b, (A, 1))
union = area_a + area_b - inter
return inter / union | 1c0aed3c354a9253c5f9278109cd13365941846c | 3,656,057 |
import uuid
def test_get_rule(client_rule_factory, client_response_factory, registered_rule):
"""Check request data that client uses to get a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the get_rule method.
4. Check the rule, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
assert url == "rule/{0}".format(rule_id), "Wrong url"
assert method == "GET", "Wrong method"
assert json is None, "Data has been specified"
response_json = {"rule_id": rule_id}
response_json.update(self._rule_factory.serialize_rule(rule=registered_rule))
return response_json
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
obtained_rule = client.get_rule(rule_id=rule_id)
assert obtained_rule.rule_id == rule_id, "Rule ID has not been set" | 62b8368072cebf0591137357167980a6d710a1f0 | 3,656,058 |
import colorsys
def summaryhsl(all_summaries, summary):
"""
Choose a color for the given system summary to distinguish it from other types of systems.
Returns hue, saturation, and luminance for the start of the range, and how much the hue can be randomly varied while staying distinguishable.
"""
lowest_att = min(att for att, ms in all_summaries)
highest_att = max(att for att, ms in all_summaries)
att_range = highest_att - lowest_att + 1
attractors, monotonic_species = summary
lowest_ms = min(ms for att, ms in all_summaries if att == attractors)
highest_ms = max(ms for att, ms in all_summaries if att == attractors)
ms_range = highest_ms - lowest_ms + 1
bin_width = 1 / (ms_range + 1) / att_range
hue = ((highest_att - attractors) / att_range) + (highest_ms - monotonic_species) * bin_width
variability_squeeze = (2 if att_range > 1 else 1) * (2 if ms_range > 1 else 1)
return hue, 1, colorsys.ONE_THIRD, bin_width / variability_squeeze | 1e874aaa359a5d8bb566809fc2be212df2890885 | 3,656,059 |
def _get_cached_values(instance, translated_model, language_code, use_fallback=False):
"""
Fetch an cached field.
"""
if not appsettings.PARLER_ENABLE_CACHING or not instance.pk or instance._state.adding:
return None
key = get_translation_cache_key(translated_model, instance.pk, language_code)
values = cache.get(key)
if not values:
return None
# Check for a stored fallback marker
if values.get('__FALLBACK__', False):
# Internal trick, already set the fallback marker, so no query will be performed.
instance._translations_cache[translated_model][language_code] = MISSING
# Allow to return the fallback language instead.
if use_fallback:
lang_dict = get_language_settings(language_code)
# iterate over list of fallback languages, which should be already
# in proper order
for fallback_lang in lang_dict['fallbacks']:
if fallback_lang != language_code:
return _get_cached_values(
instance, translated_model, fallback_lang,
use_fallback=False
)
return None
values['master'] = instance
values['language_code'] = language_code
return values | e650eabbfde8b877519b9456dba9021dfa0f78e6 | 3,656,060 |
def tensor_index_by_list(data, list_index):
"""Tensor getitem by list of int and bool"""
data_shape = F.shape(data)
indexes_types = hyper_map(F.typeof, list_index)
if const_utils.judge_indexes_types(indexes_types, mstype.int_type + (mstype.bool_,)):
sub_tuple_index = const_utils.transform_sequence_index(list_index, data_shape[0], const_utils.TENSOR_GETITEM)
if not sub_tuple_index:
data_rank = len(data_shape)
if data_rank == 1:
return const_utils.make_tensor([], data.dtype, ())
return const_utils.make_tensor([], data.dtype, data_shape[1:])
tensor_index = const_utils.make_tensor(sub_tuple_index, mstype.int64)
return F.gather(data, tensor_index, 0)
tuple_index_new = ()
for index in list_index:
tuple_index_new += (index,)
return tensor_index_by_tuple(data, tuple_index_new) | 99702ca58ebd7f316d83687804f09ac0639e3f17 | 3,656,061 |
def sample_ingridient(user, name='Salt'):
"""Create and return a sample ingridient"""
return Ingridient.objects.create(user=user, name=name) | 8904f11164a78959eb8073b80fa349155c1ae185 | 3,656,062 |
def remove_duplicates_from_list(params_list):
"""
Common function to remove duplicates from a list
Author: [email protected]
:param params_list:
:return:
"""
if params_list:
return list(dict.fromkeys(params_list))
return list() | 885b2e048ec672bd2d24fabe25066bc2df3ea8a8 | 3,656,063 |
from typing import Sequence
def _scale_and_shift(
x: chex.Array,
params: Sequence[chex.Array],
has_scale: bool,
has_shift: bool,
) -> chex.Array:
"""Example of a scale and shift function."""
if has_scale and has_shift:
scale, shift = params
return x * scale + shift
elif has_scale:
assert len(params) == 1
return x * params[0]
elif has_shift:
assert len(params) == 1
return x + params[0]
else:
raise ValueError("You must have either `has_scale` or `has_shift` set "
"to True.") | 68c7128ff7c1788cd77e3737adff293f488e190e | 3,656,066 |
import math
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects
:param aLocation1: starting location
:param aLocation2: ending location
:return:
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
dlong_c = dlong*math.cos(math.radians(aLocation1.lat))
return math.sqrt((dlat * dlat) + (dlong_c * dlong_c)) * 1.113195e5 | 5f1428c099f79ba8b41177f87e6a3bffed13e00b | 3,656,067 |
import scipy
def merge_components(a,c,corr_img_all_r,U,V,normalize_factor,num_list,patch_size,merge_corr_thr=0.6,merge_overlap_thr=0.6,plot_en=False):
""" want to merge components whose correlation images are highly overlapped,
and update a and c after merge with region constrain
Parameters:
-----------
a: np.ndarray
matrix of spatial components (d x K)
c: np.ndarray
matrix of temporal components (T x K)
corr_img_all_r: np.ndarray
corr image
U, V: low rank decomposition of Y
normalize_factor: std of Y
num_list: indices of components
patch_size: dimensions for data
merge_corr_thr: scalar between 0 and 1
temporal correlation threshold for truncating corr image (corr(Y,c)) (default 0.6)
merge_overlap_thr: scalar between 0 and 1
overlap ratio threshold for two corr images (default 0.6)
Returns:
--------
a_pri: np.ndarray
matrix of merged spatial components (d x K')
c_pri: np.ndarray
matrix of merged temporal components (T x K')
corr_pri: np.ndarray
matrix of correlation images for the merged components (d x K')
flag: merge or not
"""
f = np.ones([c.shape[0],1]);
############ calculate overlap area ###########
a = csc_matrix(a);
a_corr = scipy.sparse.triu(a.T.dot(a),k=1);
cor = csc_matrix((corr_img_all_r>merge_corr_thr)*1);
temp = cor.sum(axis=0);
cor_corr = scipy.sparse.triu(cor.T.dot(cor),k=1);
cri = np.asarray((cor_corr/(temp.T)) > merge_overlap_thr)*np.asarray((cor_corr/temp) > merge_overlap_thr)*((a_corr>0).toarray());
a = a.toarray();
connect_comps = np.where(cri > 0);
if len(connect_comps[0]) > 0:
flag = 1;
a_pri = a.copy();
c_pri = c.copy();
G = nx.Graph();
G.add_edges_from(list(zip(connect_comps[0], connect_comps[1])))
comps=list(nx.connected_components(G))
merge_idx = np.unique(np.concatenate([connect_comps[0], connect_comps[1]],axis=0));
a_pri = np.delete(a_pri, merge_idx, axis=1);
c_pri = np.delete(c_pri, merge_idx, axis=1);
corr_pri = np.delete(corr_img_all_r, merge_idx, axis=1);
num_pri = np.delete(num_list,merge_idx);
for comp in comps:
comp=list(comp);
print("merge" + str(num_list[comp]+1));
a_zero = np.zeros([a.shape[0],1]);
a_temp = a[:,comp];
if plot_en:
spatial_comp_plot(a_temp, corr_img_all_r[:,comp].reshape(patch_size[0],patch_size[1],-1,order="F"),num_list[comp],ini=False);
mask_temp = np.where(a_temp.sum(axis=1,keepdims=True) > 0)[0];
a_temp = a_temp[mask_temp,:];
y_temp = np.matmul(a_temp, c[:,comp].T);
a_temp = a_temp.mean(axis=1,keepdims=True);
c_temp = c[:,comp].mean(axis=1,keepdims=True);
model = NMF(n_components=1, init='custom')
a_temp = model.fit_transform(y_temp, W=a_temp, H = (c_temp.T));
a_zero[mask_temp] = a_temp;
c_temp = model.components_.T;
corr_temp = vcorrcoef(U/normalize_factor, V.T, c_temp);
a_pri = np.hstack((a_pri,a_zero));
c_pri = np.hstack((c_pri,c_temp));
corr_pri = np.hstack((corr_pri,corr_temp));
num_pri = np.hstack((num_pri,num_list[comp[0]]));
return flag, a_pri, c_pri, corr_pri, num_pri
else:
flag = 0;
return flag | e2e15c208ae71ba20cc84d8c0501485c04e41a90 | 3,656,068 |
def RenderSubpassStartInputAttachmentsVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartInputAttachmentsVector(builder, numElems) | 484ba9746f278cfcc7d50a282605ebc9a3a4fb2b | 3,656,069 |
def GetCLInfo(cl_info_str):
"""Gets CL's repo_name and revision."""
return cl_info_str.split('/') | d077216b2804c249a7d0ffdbff7f992dde106501 | 3,656,070 |
def acyclic_run(pipeline):
"""
@summary: 逆转反向边
@return:
"""
deformed_flows = {'{}.{}'.format(flow[PWE.source], flow[PWE.target]): flow_id
for flow_id, flow in pipeline[PWE.flows].items()}
reversed_flows = {}
while True:
no_circle = validate_graph_without_circle(pipeline)
if no_circle['result']:
break
source = no_circle['error_data'][-2]
target = no_circle['error_data'][-1]
circle_flow_key = '{}.{}'.format(source, target)
flow_id = deformed_flows[circle_flow_key]
reversed_flows[flow_id] = deepcopy(pipeline[PWE.flows][flow_id])
pipeline[PWE.flows][flow_id].update({
PWE.source: target,
PWE.target: source
})
source_node = pipeline['all_nodes'][source]
delete_flow_id_from_node_io(source_node, flow_id, PWE.outgoing)
add_flow_id_to_node_io(source_node, flow_id, PWE.incoming)
target_node = pipeline['all_nodes'][target]
delete_flow_id_from_node_io(target_node, flow_id, PWE.incoming)
add_flow_id_to_node_io(target_node, flow_id, PWE.outgoing)
return reversed_flows | 535edb2a7ccd1c0995fe46bff8a931175c353e51 | 3,656,071 |
def TextAreaFieldWidget(field, request): # pylint: disable=invalid-name
"""IFieldWidget factory for TextWidget."""
return FieldWidget(field, TextAreaWidget(request)) | 0d2431b1274e34978a6869efed0014982aaaa2e2 | 3,656,072 |
def s_wexler(T_K):
"""
Calculates slope of saturation vapor pressure curve over water at each temperature
based on Wexler 1976, with coefficients from Hardy 1998 (ITS-90).
Args:
T_K (np.ndarray (dimension<=2), float, list of floats) : Air or Dewpoint Temperatures [K]
Returns:
s : np.ndarray of slopes [Pa / deg C]
"""
powers = np.arange(-3, 4).reshape((1, 1, 7))
pow_coeffs = powers.copy() + 1
T_K = np.atleast_3d(T_K).astype(dtype=np.float64)
temps = np.repeat(T_K, 8, axis=-1)
temps[..., :-1] = pow_coeffs * c.gs[..., :-1] * np.power(temps[..., :-1], powers)
temps[..., -1] = -1. * c.gs[..., -1] * temps[..., -1] ** -1
s = np.squeeze(temps.sum(axis=-1)) * es_wexler(T_K)
return s | b27b713b6c609115fa36f458c7f72358c001fbd5 | 3,656,074 |
from importlib import import_module
def get_additional_bases():
"""
Looks for additional view bases in settings.REST_EASY_VIEW_BASES.
:return:
"""
resolved_bases = []
for base in getattr(settings, 'REST_EASY_VIEW_BASES', []):
mod, cls = base.rsplit('.', 1)
resolved_bases.append(getattr(import_module(mod), cls))
return resolved_bases | 485c2f0d4778399ff534f40e681706419c3c923a | 3,656,075 |
from pathlib import Path
import warnings
def load_mask_from_shp(shp_file: Path, metad: dict) -> np.ndarray:
"""
Load a mask containing geometries from a shapefile,
using a reference dataset
Parameters
----------
shp_file : str
shapefile containing a polygon
metad : dict
rasterio-style metadata dictionary
Returns
-------
mask_im : numpy.ndarray
mask image
Notes
-----
1) Pixels outside of the polygon are assigned
as nodata in the mask
2) Exception is raised if no Polygon geometry exists
in the shapefile
"""
sf = gpd.read_file(shp_file).to_crs(metad["crs"])
# extract non-empty polygons from the shapefile
geoms = [
g for g in sf.geometry if g.type.lower() == "polygon" and g.is_empty is False
]
nshapes = len(geoms)
if nshapes == 0:
raise Exception("input shapefile does not have any 'Polygon' geometry")
if nshapes > 1:
warnings.warn(
f"{nshapes} Polygons found in shapefile. It is recommended only to have one",
UserWarning,
stacklevel=1,
)
mask_im = rasterio.features.geometry_mask(
geoms,
out_shape=(metad["height"], metad["width"]),
transform=metad["transform"],
all_touched=False,
invert=True,
)
return mask_im | fd8178919b2afec71f69a8a7a00e1b2f224d2509 | 3,656,076 |
def est_corner_plot(estimation, settings=None, show=True, save=None):
"""Wrapper to corner plot of `corner <https://corner.readthedocs.io/en/latest/>`_ module;
visualisation of the parameter posterior distribution by all 2-dimensional and
1-dimensional marginals.
Parameters
----------
estimation : memocell.estimation.Estimation
A memocell estimation object.
settings : dict of dict, optional
Optional labels for parameters.
show : bool, optional
Plot is shown if `show=True`.
save : None or str, optional
Provide a path to save the plot.
Returns
-------
fig : matplotlib.figure.Figure
axes : list or array of matplotlib.axes
"""
# if not given, create some default settings
if settings==None:
settings = dict()
for theta_id in estimation.net.net_theta_symbolic:
param = estimation.net.net_rates_identifier[theta_id]
settings[param] = {'label': param}
# get plotting information from estimation instance
samples, labels = estimation._samples_corner_parameters(settings)
# use corner package for this plot
fig = corner.corner(samples, labels=labels)
# save/show figure
if save!=None:
plt.savefig(save, bbox_inches='tight')
if show:
plt.show(fig, block=False)
return fig, fig.axes | 87f9eda0dc3bf61f66d4ee28f693dad4ef383f24 | 3,656,077 |
def voigt_peak_err(peak, A, dA, alphaD, dalphaD):
"""
Gives the error on the peak of the Voigt profile. \
It assumes no correlation between the parameters and that they are \
normally distributed.
:param peak: Peak of the Voigt profile.
:type peak: array
:param A: Area under the Voigt profile.
:param dA: Error on the area `A`.
:type dA: array
:param alphaD: HWHM of the Gaussian core.
:type alphaD: array
"""
dpeak = abs(peak)*np.sqrt(np.power(dalphaD/alphaD, 2.) + np.power(dA/A, 2.))
return dpeak | 52d3fbb7fabe5dfe2e5ab67bcd498d5434f7afc7 | 3,656,078 |
import typing
def discord_api_call(method: str, params: typing.Dict, func, data, token: str) -> typing.Any:
""" Calls Discord API. """
# This code is from my other repo -> https://gtihub.com/kirillzhosul/python-discord-token-grabber
# Calling.
return func(
f"https://discord.com/api/{method}",
params=params,
headers={
"Authorization": f"{token}",
"Content-Type": "application/json"
},
data=data
) | 84ea201c88dd4260bbc80dbd45654c01cb5a36ee | 3,656,080 |
import logging
def get_startup(config: Config) -> Startup:
"""Extracts and validates startup parameters from the application config
file for the active profile
"""
db_init_schema = config.extract_config_value(
('postgres', 'startup', 'init_schema'),
lambda x: x is not None and isinstance(x, bool),
lambda x: x,
'bool'
)
db_wipe_schema = config.extract_config_value(
('postgres', 'startup', 'wipe_schema'),
lambda x: x is not None and isinstance(x, bool),
lambda x: x,
'bool'
)
if db_wipe_schema and not db_init_schema:
logging.getLogger(__name__).warning(
"Configuration is set to wipe database schema, but not"
" re-initialize it afterward: despite configuration, schema will be"
" re-initialized"
)
db_init_schema = True
return Startup(
init_schema=db_init_schema,
wipe_schema=db_wipe_schema
) | daa3809ed4f8be6c991796c8bbc11ee7b1434ee5 | 3,656,081 |
def new_request(request):
"""Implements view that allows users to create new requests"""
user = request.user
if user.user_type == 'ADM':
return redirect('/admin')
if request.method == "POST":
request_type = request.POST.get('requestType')
if request_type == 'SC' and user.user_object.type == 'PR':
schedule = request.POST.getlist('schedule')
start_time = request.POST.get('start_time')
# Create schedule model
monday_start = tuesday_start = wednesday_start = None
thursday_start = friday_start = saturday_start = sunday_start = None
for day in schedule:
if day == 'MO':
monday_start = start_time
elif day == 'TU':
tuesday_start = start_time
elif day == 'WE':
wednesday_start = start_time
elif day == 'TH':
thursday_start = start_time
elif day == 'FR':
friday_start = start_time
elif day == 'SA':
saturday_start = start_time
elif day == 'SU':
sunday_start = start_time
schedule_model = Schedule.objects.get_or_create(monday_start=monday_start,
tuesday_start=tuesday_start,
wednesday_start=wednesday_start,
thursday_start=thursday_start,
friday_start=friday_start,
saturday_start=saturday_start,
sunday_start=sunday_start)[0]
request_change = None
else:
schedule_model = None
request_change = request.POST.get('request_change')
request = Request.objects.get_or_create(user_id=user, schedule_id=schedule_model,
request_change=request_change,
current_request_review_id=None,
request_type=request_type)[0]
request_review = RequestReview.objects.get_or_create(request_id=request,
status='P')[0]
request.current_request_review_id = request_review
request_review.save()
request.save()
# create new notification
notification = Notification.objects.get_or_create(notification_type='R', is_dismissed=False,
request=request)[0]
notification.save()
# sending emails for this request:
email_vendor.email_admin_new_request(request)
email_vendor.email_user_new_request(request)
return redirect('/requests')
else:
# GET Request
return render(request, 'applications/request_new.html') | d90f72d5f299282709ed8a925569512a81d60591 | 3,656,082 |
def get_slice_test(eval_kwargs, test_kwargs, test_dataloader, robustness_testing_datasets):
"""
Args:
test_dataloader:
test_kwargs:
eval_kwargs (dict):
test_dataloader (Dataloader):
robustness_testing_datasets (dict):
Returns:
"""
slice_test = None
if 'slice' in robustness_testing_datasets:
slice_kwargs = {'dataset': robustness_testing_datasets['slice']}
if 'sampler' in test_kwargs:
slice_kwargs['sampler'] = test_kwargs['sampler']
slice_kwargs.update(eval_kwargs)
slice_test = test_dataloader(**slice_kwargs)
return slice_test | b995ff26fd743f106115c5d5958dd0654e0d4645 | 3,656,083 |
def transform_config(cfg, split_1='search:', split_2='known_papers:'):
"""Ugly function to make cfg.yml less ugly."""
before_search, after_search = cfg.split(split_1, 1)
search_default, papers_default = after_search.split(split_2, 1)
search, paper_comment = '', ''
for line in search_default.splitlines():
line = line.strip()
if line:
if line.startswith('-'):
search += ' '
elif line.startswith('# List of paper ids'):
paper_comment = line
continue
search += ' ' + line + '\n'
ok = papers_default
if '-' in papers_default:
ok = ' ['
for line in papers_default.splitlines():
line = line.strip()
if '-' in line:
ok += line.split('- ')[1] + ', '
ok = ok[:-2] + ']'
return f"{before_search}{split_1}\n{search}{paper_comment}\n{split_2}{ok}" | 78d079b6b06c8426be2b65307782129c414a42c4 | 3,656,084 |
def filter_coords(raw_lasso, filter_mtx):
"""Filter the raw data corresponding to the new coordinates."""
filter_mtx_use = filter_mtx.copy()
filter_mtx_use["y"] = filter_mtx_use.index
lasso_data = pd.melt(filter_mtx_use, id_vars=["y"], value_name="MIDCounts")
lasso_data = lasso_data[lasso_data["MIDCounts"] != 0][["x", "y"]]
new_lasso = pd.merge(raw_lasso, lasso_data, on=["x", "y"], how="inner")
return new_lasso | fce1159db2a2bdb75acbe9b7ccb236af8bade627 | 3,656,085 |
def compute_threshold(predictions_list, dev_labels, f1=True):
"""
Determine the best threshold to use for classification.
Inputs:
predictions_list: prediction found by running the model
dev_labels: ground truth label to be compared with predictions_list
f1: True is using F1 score, False if using accuracy score
Returns:
best_threshold: threshold that yields the best accuracy
"""
predictions_list = predictions_list.reshape(-1, 1)
dev_labels = dev_labels.reshape(-1, 1)
both = np.column_stack((predictions_list, dev_labels))
both = both[both[:, 0].argsort()]
predictions_list = both[:, 0].ravel()
dev_labels = both[:, 1].ravel()
accuracies = np.zeros(np.shape(predictions_list))
for i in range(np.shape(predictions_list)[0]):
score = predictions_list[i]
predictions = (predictions_list >= score) * 2 - 1
accuracy = accuracy_score(predictions, dev_labels)
if f1:
accuracy = f1_score(dev_labels, predictions)
accuracies[i] = accuracy
indices = np.argmax(accuracies)
best_threshold = np.mean(predictions_list[indices])
return best_threshold | 230824c1454978cbe7c5f50ee43fba7b16754922 | 3,656,086 |
import torch
def color2position(C, min=None, max=None):
"""
Converts the input points set into colors
Parameters
----------
C : Tensor
the input color tensor
min : float (optional)
the minimum value for the points set. If None it will be set to -1 (default is None)
max : float (optional)
the maximum value for the points set. If None it will be set to +1 (default is None)
Returns
-------
Tensor
the points set tensor
"""
if min is None:
min = -1
if max is None:
max = 1
return torch.add(torch.mul(C, max-min), min) | 809d8cfd6f24e6abb6d65d5b576cc0b0ccbc3fdf | 3,656,087 |
def is_empty_parsed_graph(graph):
"""
Checks if graph parsed from web page only contains an "empty" statement, that was not embedded in page
namely (<subjectURI>, <http://www.w3.org/ns/md#item>, <http://www.w3.org/1999/02/22-rdf-syntax-ns#nil>)
:param graph: an rdflib.Graph
:return: True if graph contains no "real" RDF, False otherwise
"""
if len(graph) > 1:
return False
for po in graph.predicate_objects(None):
if po == (URIRef(u'http://www.w3.org/ns/md#item'),
URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil')):
return True
return False | bf66271bc23f078669bc478a133b67c715fd8fdf | 3,656,088 |
def fillinNaN(var,neighbors):
"""
replacing masked area using interpolation
"""
for ii in range(var.shape[0]):
a = var[ii,:,:]
count = 0
while np.any(a.mask):
a_copy = a.copy()
for hor_shift,vert_shift in neighbors:
if not np.any(a.mask): break
a_shifted=np.roll(a_copy,shift=hor_shift,axis=1)
a_shifted=np.roll(a_shifted,shift=vert_shift,axis=0)
idx=~a_shifted.mask*a.mask
#print count, idx[idx==True].shape
a[idx]=a_shifted[idx]
count+=1
var[ii,:,:] = a
return var | a8ffc34dac72cbd4ecdbbc9ad02270a457b0b8d9 | 3,656,089 |
from plistlib import loads, FMT_BINARY
from bplistlib import loads
def parse_plist_from_bytes(data):
"""
Convert a binary encoded plist to a dictionary.
:param data: plist data
:return: dictionary
"""
try:
return loads(data, fmt=FMT_BINARY)
except ImportError:
return loads(data, binary=True) | b9f96ef749af88bdb950d8f3f36b584f6766661d | 3,656,090 |
def projection_standardizer(emb):
"""Returns an affine transformation to translate an embedding to the centroid
of the given set of points."""
return Affine.translation(*(-emb.mean(axis=0)[:2])) | 65686636caeac72a16198ac6c7f603836eaedc53 | 3,656,091 |
def forward_imputation(X_features, X_time):
"""
Fill X_features missing values with values, which are the same as its last measurement.
:param X_features: time series features for all samples
:param X_time: times, when observations were measured
:return: X_features, filled with last measurements instead of zeros (missing observations)
"""
time_length = [np.where(times == 0)[0][1] if np.where(times == 0)[0][0] == 0 else np.where(times == 0)[0][0] for times in X_time]
# impute times series features
for i, sample in enumerate(X_features):
for j, ts in enumerate(sample.T): # note the transposed matrix
first_observation = True
current_value = -1
for k, observation in enumerate(ts[:time_length[i]]):
if X_features[i, k, j] == 0 and first_observation:
continue
elif X_features[i, k, j] != 0:
current_value = X_features[i, k, j]
first_observation = False
elif X_features[i, k, j] == 0 and not first_observation:
X_features[i, k, j] = current_value
return X_features | ea0a41bfef02752338dc5384ce7fcd447c95f8c7 | 3,656,092 |
def calc_mlevel(ctxstr, cgmap, gtftree, pmtsize=1000):
"""
Compute the mean methylation level of promoter/gene/exon/intron/IGN in each gene
"""
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
ign = defaultdict(list)
mtable = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
counter = defaultdict(lambda: defaultdict(int))
for chr in set(ctxstr) & set(cgmap) & set(gtftree):
mask = [1]*len(cgmap[chr])
for (gene_id, strand) in gtftree[chr]:
feature_mlevels = defaultdict(lambda: defaultdict(list))
gstart = min(gtftree[chr][(gene_id, strand)])[0]
gend = max(gtftree[chr][(gene_id, strand)])[1]
mask[gstart:gend] = [0]*(gend - gstart)
if strand == '+':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart-pmtsize:gstart], cgmap[chr][gstart-pmtsize:gstart])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
elif strand == '-':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gend:gend+pmtsize], cgmap[chr][gend:gend+pmtsize])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart:gend], cgmap[chr][gstart:gend])):
tag = tag.upper()
inexon = False
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['gene'].append(mlevel)
for exon in gtftree[chr][(gene_id, strand)]:
if exon[0] <= pos+gstart < exon[1]:
feature_mlevels[inv_ctxs[tag]]['exon'].append(mlevel)
inexon = True
break
if not inexon:
feature_mlevels[inv_ctxs[tag]]['intron'].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
for feature in feature_mlevels[ctx]:
counter[ctx][feature] += len(feature_mlevels[ctx][feature])
mtable[ctx][gene_id][feature] = np.mean(feature_mlevels[ctx][feature])
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr], cgmap[chr])):
tag = tag.upper()
if (tag in inv_ctxs) and (mask[pos] == 1) and (mlevel != '-'):
ign[inv_ctxs[tag]].append(mlevel)
for ctx in ign:
ign[ctx] = np.mean(ign[ctx])
cg_table = pd.DataFrame(mtable['CG']).T
cg_table = cg_table[['pmt', 'gene', 'exon', 'intron']]
chg_table = pd.DataFrame(mtable['CHG']).T
chg_table = chg_table[['pmt', 'gene', 'exon', 'intron']]
chh_table = pd.DataFrame(mtable['CHH']).T
chh_table = chh_table[['pmt', 'gene', 'exon', 'intron']]
return ign, cg_table, chg_table, chh_table | 59b3a36e09e6ea0dd3608da0cf04f14f4d487182 | 3,656,093 |
def _get_service(plugin):
"""
Return a service (ie an instance of a plugin class).
:param plugin: any of: the name of a plugin entry point; a plugin class; an
instantiated plugin object.
:return: the service object
"""
if isinstance(plugin, basestring):
try:
(plugin,) = iter_entry_points(
group=PLUGINS_ENTRY_POINT_GROUP,
name=plugin
)
except ValueError:
raise PluginNotFoundException(plugin)
return plugin.load()()
elif isinstance(plugin, _pca_Plugin):
return plugin
elif isclass(plugin) and issubclass(plugin, _pca_Plugin):
return plugin()
else:
raise TypeError("Expected a plugin name, class or instance", plugin) | a3433521b40861926d9ac3efa6a693d926a7fc94 | 3,656,094 |
def taiut1(tai1, tai2, dta):
"""
Wrapper for ERFA function ``eraTaiut1``.
Parameters
----------
tai1 : double array
tai2 : double array
dta : double array
Returns
-------
ut11 : double array
ut12 : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - - -
e r a T a i u t 1
- - - - - - - - - -
Time scale transformation: International Atomic Time, TAI, to
Universal Time, UT1.
Given:
tai1,tai2 double TAI as a 2-part Julian Date
dta double UT1-TAI in seconds
Returned:
ut11,ut12 double UT1 as a 2-part Julian Date
Returned (function value):
int status: 0 = OK
Notes:
1) tai1+tai2 is Julian Date, apportioned in any convenient way
between the two arguments, for example where tai1 is the Julian
Day Number and tai2 is the fraction of a day. The returned
UT11,UT12 follow suit.
2) The argument dta, i.e. UT1-TAI, is an observed quantity, and is
available from IERS tabulations.
Reference:
Explanatory Supplement to the Astronomical Almanac,
P. Kenneth Seidelmann (ed), University Science Books (1992)
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
ut11, ut12, c_retval = ufunc.taiut1(tai1, tai2, dta)
check_errwarn(c_retval, 'taiut1')
return ut11, ut12 | c7f9490a5af86de98c89af37cb6ca1bcb92d107a | 3,656,095 |
from SPARQLWrapper import SPARQLWrapper, JSON
def fetch_ppn(ppn):
"""
"""
ENDPOINT_URL = 'http://openvirtuoso.kbresearch.nl/sparql'
sparql = SPARQLWrapper(ENDPOINT_URL)
sqlquery = """
SELECT ?collatie WHERE {{
kbc:{ppn} dcterms:extent ?formaat, ?collatie .
FILTER (?formaat != ?collatie ) .
FILTER regex(?formaat, "^[0-9]{{1,2}}°", "i") .
}}
""".format(ppn=ppn)
sparql.setQuery(sqlquery)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
# {'head': {'link': [], 'vars': ['collatie']}, 'results': {'bindings': [{'collatie': {'value': '*`SUP`8`LO` A-S`SUP`8`LO` (S8 blank)', 'type': 'literal'}}], 'distinct': False, 'ordered': True}}
result = results['results']['bindings'][0]['collatie']['value']
return result | fd974eccc7f4c099320c50a20b678d36cea7b899 | 3,656,096 |
def prepare_link_title(
item: feedparser.FeedParserDict) -> feedparser.FeedParserDict:
"""
Для RSS Item возвращает ссылку, заголовок и описание
:param item:
:return:
"""
result = None
if item:
assert item.title, 'Not found title in item'
assert item.link, 'Not found link in item'
link = item.link.replace('https://www.google.com/url?rct=j&sa=t&url=',
'')
ge_ind = link.find('&ct=ga')
if ge_ind > -1:
link = link[0:ge_ind]
title = item.title.replace('<b>', '').replace('</b>', '')
item.link = link
item.title = title
result = item
return result | 445eccd9855484b65b726a4ee12a3dfa9a9de375 | 3,656,097 |
def api_docs_redirect():
""" Redirect to API docs """
return redirect('/api/v1', code=302) | d7ed10aa264d1403325f0b044b4c7b8b20b5989f | 3,656,098 |
from typing import List
def print_topics(model, vectorizer, top_n: int=10)-> List:
"""Print the top n words found by each topic model.
Args:
model: Sklearn LatentDirichletAllocation model
vectorizer: sklearn CountVectorizer
top_n (int): Number of words you wish to return
Source: https://towardsdatascience.com/end-to-end-topic-modeling-in-python-latent-dirichlet-allocation-lda-35ce4ed6b3e0
"""
for idx, topic in enumerate(model.components_):
print(f"Topic {idx}:")
print([(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
return [vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n-1:-1]] | c0477c19c6806c2eaacb4165d332291dc0ba341b | 3,656,099 |
def create_logistic_vector(input_vector, cutoff):
"""
Creates a vector of 0s and 1s based on an input vector of numbers with a cut-off point.
"""
output_vector = np.zeros(len(input_vector))
n = 0
for i in range(len(input_vector)):
if input_vector[i] > cutoff:
output_vector[i] = 1
else:
output_vector[i] = -1 # Set to -1 rather than 0 to help make later calculations easier.
n += 1
return output_vector | bb4756f745f56fae7d8d4f0b1a1758ef6d70fb5a | 3,656,102 |
def profile(username):
""" user profile """
user = User.query.filter_by(username=username).first_or_404()
return render_template("user/profile.jinja.html", user=user) | 31e2a1b108c0652356cea92f32e9077105733726 | 3,656,103 |
def action_rescale(action):
"""Rescale Distribution actions to exp one"""
return np.array([0 if abs(a) < 0.5 else 10 ** (a-3) if a > 0 else -(10 ** (-a - 3)) for a in action * 3]) | 534509b76410eaeadee599e1ac510def8243e7ba | 3,656,104 |
def multi_knee(points: np.ndarray, t1: float = 0.99, t2: int = 3) -> np.ndarray:
"""
Recursive knee point detection based on the curvature equations.
It returns the knee points on the curve.
Args:
points (np.ndarray): numpy array with the points (x, y)
t1 (float): coefficient of determination threshold (default 0.99)
t2 (int): number of points threshold (default 3)
Returns:
np.ndarray: knee points on the curve
"""
return mk.multi_knee(knee, points, t1, t2) | ee44d9f51c843a0fb8e18f10057b5c6510dd8f3a | 3,656,105 |
def parse_dotted_path(path):
"""
Extracts attribute name from dotted path.
"""
try:
objects, attr = path.rsplit('.', 1)
except ValueError:
objects = None
attr = path
return objects, attr | 4685fad6461286b957a8d0056df2146fdd0f2e55 | 3,656,106 |
def resource_media_fields(document, resource):
""" Returns a list of media fields defined in the resource schema.
:param document: the document eventually containing the media files.
:param resource: the resource being consumed by the request.
.. versionadded:: 0.3
"""
media_fields = app.config['DOMAIN'][resource]['_media']
return [field for field in media_fields if field in document] | b54bc5f7fd35626866d70ce6d46bf0b84b9cf1b8 | 3,656,107 |
from typing import List
from typing import Tuple
def _parse_moving(message: List[str]) -> Tuple[Actions, str]:
"""Parses the incoming message list to determine if movement is found.
Args:
message: list of words in the player message
Returns: a tuple of the action and direction
"""
short_dir = ['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw']
long_dir = [
'north', 'northeast', 'east', 'southeast', 'south', 'southwest',
'west', 'northwest'
]
for d in long_dir:
if d in message:
return (Actions.MOVE, d)
for d in short_dir:
if d in message:
direction = long_dir[short_dir.index(d)]
return (Actions.MOVE, direction)
return (Actions.UNKNOWN, '') | 9785cbeb39dbc9ba980f605b648fb77855fe863d | 3,656,109 |
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Take
blobs: list of blobs to extract as in forward()
kwargs: Keys are input blob names and values are blob ndarrays.
Refer to forward().
Give
all_outs: {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in outs.items():
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(next(iter(all_outs.values()))) - len(next(iter(kwargs.values())))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs | f5b6acf347e4fb85e0148d2d176042559f93b6a1 | 3,656,110 |
def email_members_old(request, course_prefix, course_suffix):
"""
Displays the email form and handles email actions
Right now this is blocking and does not do any batching.
Will have to make it better
"""
error_msg=""
success_msg=""
form = EmailForm()
if request.method == "POST":
form = EmailForm(data=request.POST)
if form.is_valid():
sender = request.common_page_data['course'].title + ' Staff <[email protected]>'
recipient_qset = User.objects.none() #get recipients in a QuerySet
if form.cleaned_data['to'] == "all" :
recipient_qset = request.common_page_data['course'].get_all_members()
elif form.cleaned_data['to'] == "students" :
recipient_qset = request.common_page_data['course'].get_all_students()
elif form.cleaned_data['to'] == "staff" :
recipient_qset = request.common_page_data['course'].get_all_course_admins()
elif form.cleaned_data['to'] == "myself":
recipient_qset = User.objects.filter(id=request.user.id)
#pdb.set_trace()
courses.email_members.tasks.email_with_celery.delay(
form.cleaned_data['subject'],
form.cleaned_data['message'],
sender,
recipient_qset.values_list('email',flat=True),
course_title=request.common_page_data['course'].title,
course_url=request.build_absolute_uri(reverse('courses.views.main', args=[course_prefix, course_suffix])))
success_msg = "Your email was successfully queued for sending"
#form = EmailForm()
else:
error_msg = "Please fix the errors below:"
context = RequestContext(request)
return render_to_response('email/email.html',
{'form': form,
'error_msg': error_msg,
'success_msg': success_msg,
'course': request.common_page_data['course'],
'common_page_data': request.common_page_data},
context_instance=context) | eeafa4d9c4ca0b0ad1b7ffaecceb03c188e02813 | 3,656,111 |
def Padding_op(Image, strides, offset_x, offset_y):
"""
Takes an image, offset required to fit output image dimensions with given strides and calculates the
padding it needs for perfect fit.
:param Image:
:param strides:
:param offset_x:
:param offset_y:
:return: Padded image
"""
if config['volumetric']:
raise Exception("3D Padding not yet implemented!")
padding_x = strides[0] - offset_x
padding_y = strides[1] - offset_y
Padded_Image = np.zeros(shape=(Image.shape[0] + padding_x, Image.shape[1] + padding_y, Image.shape[2]),
dtype=Image.dtype)
Padded_Image[padding_x // 2:(padding_x // 2) + (Image.shape[0]), padding_y // 2:(padding_y // 2) + Image.shape[1],
:] = Image
return Padded_Image | d3f046069a597f2d7e3204b543f1f60c8e1e5b23 | 3,656,112 |
def area_triangle(point_a: array_like, point_b: array_like, point_c: array_like) -> np.float64:
"""
Return the area of a triangle defined by three points.
The points are the vertices of the triangle. They must be 3D or less.
Parameters
----------
point_a, point_b, point_c : array_like
The three vertices of the triangle.
Returns
-------
np.float64
The area of the triangle.
References
----------
http://mathworld.wolfram.com/TriangleArea.html
Examples
--------
>>> from skspatial.measurement import area_triangle
>>> area_triangle([0, 0], [0, 1], [1, 0])
0.5
>>> area_triangle([0, 0], [0, 2], [1, 1])
1.0
>>> area_triangle([3, -5, 1], [5, 2, 1], [9, 4, 2]).round(2)
12.54
"""
vector_ab = Vector.from_points(point_a, point_b)
vector_ac = Vector.from_points(point_a, point_c)
# Normal vector of plane defined by the three points.
vector_normal = vector_ab.cross(vector_ac)
return 0.5 * vector_normal.norm() | 0c21ca96f8a6fd4d088cf0fa47a260b3bc582966 | 3,656,113 |
def test_valid(line):
"""Test for 40 character hex strings
Print error on failure"""
base_error = '*** WARNING *** line in torrent list'
if len(line) != 40:
print(base_error, 'incorrect length:', line)
elif any(char not in HEX for char in line):
print(base_error, 'has non-hex digits:', line)
else:
return True | ca6517a8dd622b07703b30af7842685b9b6d5865 | 3,656,115 |
def custom_formatter(code, msg):
""" 自定义结果格式化函数
:param code: 响应码
:param msg: 响应消息
"""
return {
"code": code,
"msg": "hello",
"sss": "tt",
} | 59a7e3f9f03f9afc42b8faec6ebe23f5373d0bf0 | 3,656,117 |
def get_sampler_callback(rank, num_replicas, noniid=0, longtail=0):
"""
noniid: noniid controls the noniidness.
- noniid = 1 refers to completely noniid
- noniid = 0 refers to iid.
longtail: longtail controls the long-tailness.
- Class i takes (1-longtail) ** i percent of data.
"""
assert noniid >= 0 and noniid <= 1, f"`noniid` in [0, 1], get {noniid}"
assert longtail >= 0 and longtail <= 1, f"`longtail` in [0, 1], get {longtail}"
if longtail > 0:
return lambda x: NONIIDLTSampler(
alpha=1 - noniid,
beta=1 - longtail,
num_replicas=num_replicas,
rank=rank,
shuffle=True,
dataset=x,
)
if noniid == 0:
# Byzantine workers
return lambda x: DistributedSampler(
num_replicas=num_replicas,
rank=rank,
shuffle=True,
dataset=x,
)
if noniid > 0:
return lambda x: DecentralizedMixedSampler(
noniid_percent=noniid,
num_replicas=num_replicas,
rank=rank,
shuffle=True,
dataset=x,
)
raise NotImplementedError("") | 05e526ba903ebd834f248d965253344136e8a8a8 | 3,656,118 |
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles):
"""
Allocates equal bitrate to all the tiles
"""
vid_bitrate = []
for i in range(len(chunk_frames)):
chunk = chunk_frames[i]
chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)]
chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)]
total_weight = sum(sum(x) for x in chunk_weight)
for x in range(nrow_tiles):
for y in range(ncol_tiles):
chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight;
vid_bitrate.append(chunk_bitrate)
return vid_bitrate | 1883f480852d49e63c0408c9ef0daeba9e50db6b | 3,656,119 |
from typing import Collection
from typing import Any
def file_filter(extensions: Collection[str]) -> Any:
"""Register a page content filter for file extensions."""
def wrapper(f):
for ext in extensions:
_file_filters[ext] = f
return f
return wrapper | bef1a304497ffaac3f294607d8a393e505c1eb19 | 3,656,120 |
def epc_calc_img_size(reg_dict):
"""
Calcalute the output image size from the EPC660 sensor
Parameters
----------
reg_dict : dict
Returns
----------
int
The number of rows
int
The number of columns in the image
"""
col_start, col_end, row_start, row_end = epc_calc_roi(reg_dict)
row_bin, col_bin = epc_calc_bin_mode(reg_dict)
row_binning, col_binning = epc_calc_binning(reg_dict)
row_div = 1
col_div = 1
if row_bin:
row_div = (1 << row_binning)
if col_bin:
col_div = (1 << col_binning)
nrows = (2*(row_end-row_start+1))/row_div
ncols = (col_end-col_start+1)/col_div
return nrows, ncols | 698b6ae6a99f8f9621c40ffaee2ab5ea5e584ce1 | 3,656,121 |
def simple_url_formatter(endpoint, url):
"""
A simple URL formatter to use when no application context
is available.
:param str endpoint: the endpoint to use.
:param str url: the URL to format
"""
return u"/{}".format(url) | 74f3e68fe10f7cc6bf8bfe81a7349a995bb79fa3 | 3,656,122 |
from typing import List
def generate_service(
name: str,
image: str,
ports: List[str] = [],
volumes: List[str] = [],
dependsOn: List[str] = [],
) -> str:
"""
Creates a string with docker compose service specification.
Arguments are a list of values that need to be added to each section
named after the parameter. i.e. the volume arguments are for the
volumes section of the service config.
"""
indent = ' '
service = "{s}{name}:\n{s}{s}image: {image}\n".format(
s=indent,
name=name,
image=image,
)
if ports:
service += "{s}ports:\n".format(s=indent*2)
for port in ports:
service += '{s}- "{port}"\n'.format(s=indent*3, port=port)
if volumes:
service += "{s}volumes:\n".format(s=indent*2)
for vol in volumes:
service += '{s}- {vol}\n'.format(s=indent*3, vol=vol)
if dependsOn:
service += "{s}depends_on:\n".format(s=indent*2)
for item in dependsOn:
service += '{s}- "{dep}"\n'.format(s=indent*3, dep=item)
return service | 581e37e69d73ab5b6c0ac533bd91e7b5cb5187d9 | 3,656,123 |
def read_integer(msg=None, error_msg=None):
"""
Asks the user for an integer value (int or long)
:param msg: The message, displayed to the user.
:param error_msg: The message, displayed to the user, in case he did not entered a valid int or long.
:return: An int or a long from the user.
"""
res = raw_input(msg)
try:
return int(res)
except (TypeError, ValueError):
pass
try:
return long(res)
except (TypeError, ValueError):
pass
if error_msg is not None:
print(error_msg)
return read_integer(msg=msg, error_msg=error_msg) | c3067b436f57583b89ca02bff5e01802845ebf69 | 3,656,124 |
def percError(predicted, observed):
"""Percentage Error
Parameters
==========
predicted : array-like
Array-like (list, numpy array, etc.) of predictions
observed : array-like
Array-like (list, numpy array, etc.) of observed values of scalar
quantity
Returns
=======
perc : float
Array of forecast errors expressed as a percentage
"""
err, pred, obse = forecastError(predicted, observed, full=True)
res = err/obse
return 100*res | 168affcb5af47563c15d27c6e662b0cf6411eca2 | 3,656,126 |
def _dict_eq(a, b):
"""
Compare dictionaries using their items iterators and loading as much
as half of each into a local temporary store. For comparisons of ordered
dicts, memory usage is nil. For comparisons of dicts whose iterators
differ in sequence maximally, memory consumption is O(N). Execution time
is O(N).
:param a: one dict
:param b: another dict
:return: True if they're the same, false otherwise
"""
# The memory consumption here is to make a linear improvement in execution
# time. In the case of a dict backed by Redis, it is faster to iterate
# over N items than to retrieve each one, by a factor of 10 or more
# because of the reduced round-trips to the server.
size = len(a)
if size != len(b):
return False
# Iterate over both dicts. Compare items. If the same ones come up
# at the same time, great, they match. If different ones come up,
# store them in the am and bm collections of misses. Check for prior
# misses that may be matched by the new elements.
bi = iteritems(b)
am = {}
bm = {}
for ak, av in iteritems(a):
bk, bv = next(bi)
if ak == bk:
if av != bv:
return False
else: # keys differ
if ak in bm:
if bm[ak] == av:
del bm[ak]
else:
return False
else:
am[ak] = av
if bk in am:
if am[bk] == bv:
del am[bk]
else:
return False
else:
bm[bk] = bv
if len(am) + len(bm) > size:
return False
return len(am) + len(bm) == 0 | 68292489e4f6f8f213f4d17cf799052cb99ece37 | 3,656,127 |
from typing import Dict
from typing import List
def avoid_snakes(my_head: Dict[str, int], snakes: List[dict], possible_moves: List[str]) -> List[str]:
"""
my_head: Dictionary of x/y coordinates of the Battlesnake head.
e.g. {"x": 0, "y": 0}
snakes: List of dictionaries of x/y coordinates for every segment of a Battlesnake.
e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ]
possible_moves: List of strings. Moves to pick from.
e.g. ["up", "down", "left", "right"]
return: The list of remaining possible_moves not blocked by other snakes
"""
for snake in snakes:
for segment in snake["body"]:
if my_head["x"] - 1 == segment["x"] and my_head["y"] == segment["y"]:
print("Segment to the left")
remove_move("left", possible_moves)
if my_head["x"] + 1 == segment["x"] and my_head["y"] == segment["y"]:
print("Segment to the right")
remove_move("right", possible_moves)
if my_head["x"] == segment["x"] and my_head["y"] - 1 == segment["y"]:
print("Segment below")
remove_move("down", possible_moves)
if my_head["x"] == segment["x"] and my_head["y"] + 1 == segment["y"]:
print("Segment above")
remove_move("up", possible_moves)
# We're going to be super conservative if we're near another head
# to avoid head on collisions
if my_head["x"] - 2 == snake["head"]["x"] and my_head["y"] == snake["head"]["y"]:
print("Dodge the head!")
remove_move("left", possible_moves)
if my_head["x"] + 2 == snake["head"]["x"] and my_head["y"] == snake["head"]["y"]:
print("Dodge the head!")
remove_move("right", possible_moves)
if my_head["x"] == snake["head"]["x"] and my_head["y"] - 2 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("down", possible_moves)
if my_head["x"] == snake["head"]["x"] and my_head["y"] + 2 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("up", possible_moves)
if my_head["x"] - 1 == snake["head"]["x"] and my_head["y"] + 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("left", possible_moves)
remove_move("up", possible_moves)
if my_head["x"] - 1 == snake["head"]["x"] and my_head["y"] - 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("left", possible_moves)
remove_move("down", possible_moves)
if my_head["x"] + 1 == snake["head"]["x"] and my_head["y"] + 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("right", possible_moves)
remove_move("up", possible_moves)
if my_head["x"] + 1 == snake["head"]["x"] and my_head["y"] - 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("right", possible_moves)
remove_move("down", possible_moves)
return possible_moves | dcdd80522486ec1c6001aa8990f2bfaf88235ec1 | 3,656,128 |
def m_unicom_online_time2_0(seq):
"""
获取联通手机在网时长所对应的code
:param seq: 联通在网时长区间
:return: code
example:
:seq: [0-1]
:return 1
"""
if not seq:
return []
if seq[0] in ["[0-1]", "(1-2]", "[3-6]"]:
seq = ["(0_6)"]
elif seq[0] in ["[7-12]"]:
seq = ["[6_12)"]
elif seq[0] in ["[13-24]"]:
seq = ["[12_24)"]
elif seq[0] in ["[25-36]", "[37,+)"]:
seq = ["[24_+)"]
return seq | 4a242d76f3d2708b5ad590830156a44fd22e7267 | 3,656,130 |
def convert_config_gui_structure(config_gui_structure, port, instance_id,
is_port_in_database, conf):
"""
Converts the internal data structure to a dictionary which follows the
"Configuration file structure", see setup.rst
:param config_gui_structure: Data structure used to hold and show
configuration information in the Gui
:return A dictionary which follows the "Configuration file structure",
see setup.rst
"""
config_dict = identify_existing_config_file(port, conf.OPRP_DIR_PATH)
if not is_port_in_database and config_dict:
file_path = get_config_file_path(port, conf.OPRP_DIR_PATH)
LOGGER.error("The identified configuration file does not exist in the database. "
"File path: %s" % file_path)
if not (is_port_in_database and config_dict):
config_dict = get_default_client()
config_dict = clear_config_keys(config_dict)
if instance_id:
config_dict[CONFIG_DICT_INSTANCE_ID_KEY] = instance_id
if contains_dynamic_discovery_info(config_gui_structure):
gui_config = GuiConfig(config_gui_structure)
config_dict['srv_discovery_url'] = gui_config.get_dynamic_discovery_issuer()
elif config_gui_structure['fetchStaticProviderInfo']['showInputFields']:
config_dict = static_provider_info_to_config_file_dict(config_gui_structure,
config_dict)
config_dict = client_registration_to_config_file_dict(config_gui_structure, config_dict)
config_dict = subject_type_to_config_file_dict(config_dict, config_gui_structure)
config_dict = profile_to_config_file_dict(config_dict, config_gui_structure)
if config_gui_structure['webfingerSubject'] != "":
config_dict['webfinger_subject'] = config_gui_structure['webfingerSubject']
if config_gui_structure['loginHint'] != "":
config_dict['login_hint'] = config_gui_structure['loginHint']
if config_gui_structure['uiLocales'] != "":
config_dict['ui_locales'] = config_gui_structure['uiLocales']
if config_gui_structure['claimsLocales'] != "":
config_dict['claims_locales'] = config_gui_structure['claimsLocales']
if config_gui_structure['acrValues'] != "":
config_dict['acr_values'] = config_gui_structure['acrValues']
if config_gui_structure['webfinger_url'] != "":
config_dict['webfinger_url'] = config_gui_structure['webfinger_url']
if config_gui_structure['webfinger_email'] != "":
config_dict['webfinger_email'] = config_gui_structure['webfinger_email']
return config_dict | 3f46a621261ba097918fb5b5d27bd7611910a623 | 3,656,131 |
def message_similarity_hard(m1, m2):
"""
Inputs: One dimension various length numpy array.
"""
return int(np.all(m1==m2)) | 8f649a295853c34d692fb96a0a7facbc82d67ddb | 3,656,132 |
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""
The identity_block is the block that has no conv layer at shortcut
Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 3
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
border_mode='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = merge([x, input_tensor], mode='sum')
x = Activation('relu')(x)
return x | 38a898a3b52f12490584206dfa6ea6b9819a1240 | 3,656,133 |
def convert_to_squad(story_summary_content, question_content, set_type):
"""
:param story_summary_content:
:param question_content:
:param category_content:
:param set_type:
:return: formatted SQUAD data
At initial version, we are just focusing on the context and question, nothing more,
therefore we are ignoring the answer part as of now
"""
squad_formatted_content = dict()
squad_formatted_content['version'] = 'narrativeqa_squad_format'
data = []
content = story_summary_content
if set_type != 'all':
content = story_summary_content[story_summary_content['set'] == set_type]
for datum in content.itertuples(index=False):
#print(datum.summary)
data_ELEMENT = dict()
data_ELEMENT['title'] = 'dummyTitle'
paragraphs = []
paragraphs_ELEMENT = dict()
superdocument = datum.summary
paragraphs_ELEMENT['context'] = superdocument
qas = []
sub_datum = question_content[question_content['document_id'] == datum.document_id]
for q_datum in sub_datum.itertuples():
# print(indx)
#print(q_datum)
qas_ELEMENT = dict()
ANSWERS_ELEMENT = dict()
qas_ELEMENT_ANSWERS = []
qas_ELEMENT['id'] = q_datum.document_id + '-' + str(q_datum.Index)
qas_ELEMENT['question'] = q_datum.question
ANSWERS_ELEMENT['answer_start'] = -1
ANSWERS_ELEMENT['text'] = 'dummyAnswer'
qas_ELEMENT_ANSWERS.append(ANSWERS_ELEMENT)
qas_ELEMENT['answers'] = qas_ELEMENT_ANSWERS
qas.append(qas_ELEMENT)
paragraphs_ELEMENT['qas'] = qas
paragraphs.append(paragraphs_ELEMENT)
data_ELEMENT['paragraphs'] = paragraphs
data.append(data_ELEMENT)
squad_formatted_content['data'] = data
return squad_formatted_content | 5b884ef521af4d5835fef25f01cb1f11d68cfafb | 3,656,134 |
def check_conditions(conditions, variable_dict, domain_dict, domain_list):
"""A function that checks if the generated variables pass the conditions and generates new ones until they do.
:param conditions: The conditions of the template.
:param variable_dict: List of variables.
:param domain_dict: the domain of the variables.
:param domain_list: a dict with the domain list.
:return: List of variables that pass the conditions of the given template.
"""
conditions = remove_unnecessary(conditions)
# Check conditions --> if false: change a variable -> check conditions
inserted_conditions = string_replace(conditions, variable_dict)
while not parse_expr(latex_to_sympy(inserted_conditions), transformations=standard_transformations +
(convert_xor, implicit_multiplication_application,), global_dict=None, evaluate=True):
variable_to_change = choice(list(variable_dict.keys())) # Chose a random key from variable_dict
if domain_list[variable_to_change]:
variable_dict[variable_to_change] = make_number_from_list(domain_dict[variable_to_change])
else:
variable_dict[variable_to_change] = new_random_value(variable_to_change, domain_dict)
inserted_conditions = string_replace(conditions, variable_dict)
return variable_dict | fffd9889d3c149f56041753522aee245135cf0ee | 3,656,136 |
def set_pin_on_teaching_page(request,
section_label,
pin=True):
"""
if pin=True, pin the section on teaching page
if pin=False, unpin the section from teaching page
@except InvalidSectionID
@except NotSectionInstructorException
@except UserCourseDisplay.DoesNotExist
"""
section = get_section_by_label(section_label)
check_section_instructor(section, get_person_of_current_user(request))
# not to pin a primary section
if section.is_primary_section:
return False
return UserCourseDisplay.set_pin(
get_user_model(request), section_label, pin) | 385940e3adc286a923a94a3205b56c3817ee6284 | 3,656,137 |
from typing import Any
def inject_python_resources() -> dict[str, Any]:
"""
Inject common resources to be used in Jinja templates.
"""
return dict(
isinstance=isinstance,
zip=zip,
enumerate=enumerate,
len=len,
str=str,
bool=bool,
int=int,
float=float,
) | 98fb7fbf39f20b9972ef5c0d35ae12b2864580b2 | 3,656,138 |
def get_feature_subsets_options(study, data_types):
"""Given a study and list of data types, get the relevant feature
subsets
"""
feature_subsets = ['custom']
if 'expression' in data_types:
try:
feature_subsets.extend(study.expression.feature_subsets.keys())
except AttributeError:
pass
if 'splicing' in data_types:
try:
feature_subsets.extend(study.splicing.feature_subsets.keys())
except AttributeError:
pass
# Cast to "set" to get rid of duplicates, then back to list because you
# can't sort a set, then back to list after sorting because you get
# an iterator... yeah ....
feature_subsets = list(natural_sort(list(set(feature_subsets))))
# Make sure "variant" is first because all datasets have that
# first remove 'variant' if it is there, then add it at the front
try:
feature_subsets.pop(feature_subsets.index('variant'))
except ValueError:
pass
feature_subsets.insert(0, 'variant')
return feature_subsets | d9310f00ff001f5ddc643998c7544df6ba5382b5 | 3,656,139 |
import json
def possibilities(q=0, *num):
"""
:param q: Número de quadrados a considerar
:param num: Em quantos quadrados a soma do nº de bombas é 1
:return:
pos -> Possibilidade de distribuição das bombas
tot -> Número de quadrados nos quais só há uma bomba
i -> Início da contagem dos quadrados onde a soma das bombas é 1
"""
lbn = []
lp = []
num = str(num).replace('(', '[').replace(')', ']')
num = json.loads(num)
for c4 in range(0, len(num)):
num[c4] += ['']
for c1 in range(0, 2 ** q):
pos = []
bn = str(bin(c1)).replace('0b', '') # bn = int(bn, base=2) -> Reverte o processo
bn = bn.rjust(q, '0')
pos += bn
ts = 0
for c2 in range(0, len(num)):
i = num[c2][0]
tot = num[c2][1] # print(bn, tot, pos)
s = 0
for c3 in range(i, tot + i):
if pos[c3] == '1':
s += 1
if num[c2][3] != '':
# print(num[c2], pos[num[c2][3]])
if pos[num[c2][3]] == '1':
s += 1
if s == num[c2][2]:
ts += 1
# print(bn, s)
if ts == len(num):
lbn += [bn]
for c5 in range(0, q):
lp += [0]
for item in lbn:
for c6 in range(0, q):
if item[c6] == '1':
lp[c6] += 1
return lp | 94c126a1bacf5bb242ad2935f949ab146f847001 | 3,656,140 |
import re
def parse_page_options(text):
"""
Parses special fields in page header. The header is separated by a line
with 3 dashes. It contains lines of the "key: value" form, which define
page options.
Returns a dictionary with such options. Page text is available as option
named "text".
"""
if type(text) != unicode:
raise TypeError('parse_page_options() expects Unicode text, not "%s".' % text.__class__.__name__)
options = dict()
text = text.replace('\r\n', '\n') # fix different EOL types
parts = text.split(u'\n---\n', 1)
if len(parts) > 1:
for line in parts[0].split('\n'):
if not line.startswith('#'):
kv = line.split(':', 1)
if len(kv) == 2:
k = kv[0].strip()
v = kv[1].strip()
if k.endswith('s'):
v = re.split('[\s,]+', v)
options[k] = v
options['text'] = parts[-1]
return options | b90b1adb7d5d6f8716b9d4e00b0e4b533393f725 | 3,656,141 |
def _read_16_bit_message(prefix, payload_base, prefix_type, is_time,
data, offset, eieio_header):
""" Return a packet containing 16 bit elements
"""
if payload_base is None:
if prefix is None:
return EIEIO16BitDataMessage(eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD:
return EIEIO16BitLowerKeyPrefixDataMessage(
prefix, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
return EIEIO16BitUpperKeyPrefixDataMessage(
prefix, eieio_header.count, data, offset)
elif payload_base is not None and not is_time:
if prefix is None:
return EIEIO16BitPayloadPrefixDataMessage(
payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD:
return EIEIO16BitPayloadPrefixLowerKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
return EIEIO16BitPayloadPrefixUpperKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
elif payload_base is not None and is_time:
if prefix is None:
return EIEIO16BitTimedPayloadPrefixDataMessage(
payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD:
return EIEIO16BitTimedPayloadPrefixLowerKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
return EIEIO16BitTimedPayloadPrefixUpperKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
return EIEIOWithoutPayloadDataMessage(eieio_header, data, offset) | b552d06b314d47ee3ae928ebcee678c65bd24f84 | 3,656,142 |
def test_linear():
""" Tests that KernelExplainer returns the correct result when the model is linear.
(as per corollary 1 of https://arxiv.org/abs/1705.07874)
"""
np.random.seed(2)
x = np.random.normal(size=(200, 3), scale=1)
# a linear model
def f(x):
return x[:, 0] + 2.0*x[:, 1]
phi = shap.KernelExplainer(f, x).shap_values(x, l1_reg="num_features(2)", silent=True)
assert phi.shape == x.shape
# corollary 1
expected = (x - x.mean(0)) * np.array([1.0, 2.0, 0.0])
np.testing.assert_allclose(expected, phi, rtol=1e-3) | 6e716d6505162aa49507b026672455d357ab7c2b | 3,656,143 |
def csm_shape(csm):
"""
Return the shape field of the sparse variable.
"""
return csm_properties(csm)[3] | a74357086a9d7233cabed1c6ddc14fdbdbe0b41f | 3,656,144 |
def hyperlist_to_labellist(hyperlist):
"""
:param hyperlist:
:return: labellist, labels to use for plotting
"""
return [hyper_to_label(hyper) for hyper in hyperlist] | 9587694d783ccbd122b58894a2d80ee5e58dc900 | 3,656,145 |
import json
def _pretty_print_dict(dictionary):
"""Generates a pretty-print formatted version of the input JSON.
Args:
dictionary (dict): the JSON string to format.
Returns:
str: pretty-print formatted string.
"""
return json.dumps(_ascii_encode_dict(dictionary), indent=2, sort_keys=True) | 17e94d18f824253540fd968c726721542f25a95e | 3,656,146 |
def _bivariate_kdeplot(x, y, filled, fill_lowest,
kernel, bw, gridsize, cut, clip,
axlabel, cbar, cbar_ax, cbar_kws, ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
scout, = ax.plot([], [])
default_color = scout.get_color()
scout.remove()
cmap = kwargs.pop("cmap", None)
color = kwargs.pop("color", None)
if cmap is None and "colors" not in kwargs:
if color is None:
color = default_color
if filled:
cmap = light_palette(color, as_cmap=True)
else:
cmap = dark_palette(color, as_cmap=True)
if isinstance(cmap, str):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
else:
cmap = mpl.cm.get_cmap(cmap)
label = kwargs.pop("label", None)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
cset = contour_func(xx, yy, z, n_levels, **kwargs)
if filled and not fill_lowest:
cset.collections[0].set_alpha(0)
kwargs["n_levels"] = n_levels
if cbar:
cbar_kws = {} if cbar_kws is None else cbar_kws
ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
if label is not None:
legend_color = cmap(.95) if color is None else color
if filled:
ax.fill_between([], [], color=legend_color, label=label)
else:
ax.plot([], [], color=legend_color, label=label)
return ax | ecb60ec3ffdc746f40b89158ef3c5b3a03e85bfc | 3,656,147 |
def unified_load(namespace, subclasses=None, recurse=False):
"""Provides a unified interface to both the module and class loaders,
finding modules by default or classes if given a ``subclasses`` parameter.
"""
if subclasses is not None:
return ClassLoader(recurse=recurse).load(namespace, subclasses=subclasses)
else:
return ModuleLoader(recurse=recurse).load(namespace) | 62f5f4e17d3d232bfa72090a836f89f782068b53 | 3,656,149 |
def generate_free_rooms(room_times: dict) -> dict:
"""
Generates data structure for getting free rooms for each time.
"""
# create data format
free_rooms = {'M': {},
'Tu': {},
'W': {},
'Th': {},
'F': {}
}
# add empty lists for each time
for dotw in free_rooms:
for i in range(0, 144):
free_rooms[dotw][i] = []
# iterate through all the rooms. days, and times
for room in room_times:
for day in room_times[room]:
for time in room_times[room][day]:
# add the room to the corresponding time
free_rooms[day][time].append(room)
return free_rooms | e60df355acd84e60c08ba34a45a2131d8d4519b4 | 3,656,150 |
def code_parse_line(li, pattern_type="import/import_externa"):
"""
External Packages
"""
### Import pattern
if pattern_type == "import":
if li.find("from") > -1:
l = li[li.find("from") + 4 : li.find("import")].strip().split(",")
else:
l = li.strip().split("import ")[1].strip().split(",")
l = [x for x in l if x != ""]
l = np_list_dropduplicate(l)
return l
# Only external
if pattern_type == "import_extern":
if li.find("from") > -1:
l = li[li.find("from") + 4 : li.find("import")].strip().split(",")
else:
l = li.strip().split("import ")[1].strip().split(",")
l = [x for x in l if x != ""]
l = [x for x in l if x[0] != "."]
l = [x.split(".")[0].split("as")[0].split("#")[0].strip() for x in l]
l = np_list_dropduplicate(l)
return l | 347b3d0c3192978beb4c26a1950d86482812310b | 3,656,151 |
def get_high(pair, path="https://api.kraken.com/0/public"):
""" Get the last 24h high price of `pair`.
Parameters
----------
pair : str
Code of the requested pair(s). Comma delimited if several pair.
path : str
Path of the exchange to request.
Returns
-------
float or dict
Last 24h higher price(s).
"""
return _get_ticker(pair, 'h', path) | 8443ad24450e8f7bd2b6fac339e5e2b9149685c1 | 3,656,152 |
def SIx():
"""
Reads in future LENS SI-x data
Returns
----------
leafmean : array leaf indices (ens x year x lat x lon)
latmean : array last freeze indices (ens x year x lat x lon)
lat : array of latitudes
lon : array of longitudes
lstfrz : list last freeze indices
"""
directory = '/volumes/eas-shared/ault/ecrl/spring-indices/data/'
versions=['002','003','004','005','006','007','008','009','010','011','012','013','014','015','016','017','018','019','020','021','022','023','024','025','026','027','028','029','030']
leaf=[]
lstfrz = []
for version in versions:
years = 'b.e11.BRCP85C5CNBDRD.f09_g16.%s.cam.h.SI-x.2006-2080.nc' % version
filename = directory + years
values = Dataset(filename)
lon = values.variables['lon'][189:240]
lat = values.variables['lat'][:32]
lstfrz_index = values.variables['lstfrz_index'][:,:32,189:240]
leaf_index = values.variables['leaf_index'][:,:32,189:240]
values.close()
leaf.append(leaf_index)
lstfrz.append(lstfrz_index)
latmean = np.asarray(lstfrz)
leafmean = np.asarray(leaf)
print 'Done! 1'
return leafmean, latmean, lstfrz, lat, lon | 0ac033577d73c6567ebef10437a7e44e51bf5c79 | 3,656,153 |
import scipy
def make_truncnorm_gen_with_bounds(mean, std, low_bound, hi_bound):
"""
low_bound and hi_bound are in the same units as mean and std
"""
assert hi_bound > low_bound
clipped_mean = min(max(mean, low_bound), hi_bound)
if clipped_mean == low_bound:
low_sigma = -0.01 * std
hi_sigma = (hi_bound - clipped_mean) / std
elif clipped_mean == hi_bound:
low_sigma = (low_bound - clipped_mean) / std
hi_sigma = 0.01 * std
else:
low_sigma = (low_bound - clipped_mean) / std
hi_sigma = (hi_bound - clipped_mean) / std
return scipy.stats.truncnorm(low_sigma, hi_sigma,
loc=clipped_mean, scale=std) | 8e957d99141a56f804bebf931098fa147d066bb8 | 3,656,154 |
from invenio_app_ils.ill.api import BORROWING_REQUEST_PID_TYPE
from invenio_app_ils.ill.proxies import current_ils_ill
from invenio_app_ils.items.api import ITEM_PID_TYPE
from invenio_app_ils.proxies import current_app_ils
from invenio_app_ils.errors import UnknownItemPidTypeError
def resolve_item_from_loan(item_pid):
"""Resolve the item referenced in loan based on its PID type."""
if item_pid["type"] == ITEM_PID_TYPE:
rec_cls = current_app_ils.item_record_cls
elif item_pid["type"] == BORROWING_REQUEST_PID_TYPE:
rec_cls = current_ils_ill.borrowing_request_record_cls
else:
raise UnknownItemPidTypeError(pid_type=item_pid["type"])
return rec_cls.get_record_by_pid(item_pid["value"]) | f58ea857a445f2e6e01f426656f87a2032ea8306 | 3,656,155 |
def delta(s1, s2):
""" Find the difference in characters between s1 and s2.
Complexity: O(n), n - length of s1 or s2 (they have the same length).
Returns:
dict, format {extra:[], missing:[]}
extra: list, letters in s2 but not in s1
missing: list, letters in s1 but not in s2
"""
letters = {}
for c in s1:
if c not in letters:
letters[c] = 1
else:
letters[c] += 1
extra = [] # letters which are in s2 but not in s1
for c in s2:
if c not in letters:
extra.append(c)
else:
letters[c] -=1
missing = [] # letters which are in s1 but not in s2
for (letter, count) in letters.iteritems():
if count > 0:
missing.append(letter)
return {'extra': extra, 'missing': missing} | e439b5a4cf634f5e53fbf845b5774342cedeb404 | 3,656,157 |
def mean_predictive_value(targets, preds, cm=None, w=None, adjusted=False):
"""
:purpose:
Calculates the mean predictive value between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
adjusted : bool. if true, adjust the output for chance (making 0 the worst
and 1 the best score). defaults to false
:returns:
mean_predictive_value : float, the mean predictive value of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.mean_predictive_value(true, pred)
0.49030739883826424
by saskra
"""
w = init_w(w, len(targets))
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, columns_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i] # sum of the diagonal = true results
for j in range(n):
columns_sums[j] += cm[i][j] # sums of the columns = predictions per class
class_div = diag / columns_sums # fraction of true results among the predicted ones per class
div_mean = 0
for i in range(n):
div_mean += class_div[i]
div_mean /= n # mean fraction of true results among the predicted ones
if adjusted:
div_mean -= 1 / n
div_mean /= 1 - 1 / n
return div_mean | 9e7b7047d0dcf79509e544ca8bb0d621d1ce283d | 3,656,158 |
def delta(phase,inc, ecc = 0, omega=0):
"""
Compute the distance center-to-center between planet and host star.
___
INPUT:
phase: orbital phase in radian
inc: inclination of the system in radian
OPTIONAL INPUT:
ecc:
omega:
//
OUTPUT:
distance center-to-center, double-float number.
___
"""
phase = 2*np.pi*phase
if ecc == 0 and omega == 0:
delta = np.sqrt(1-(np.cos(phase)**2)*(np.sin(inc)**2))
else:
delta = (1.-ecc**2.)/(1.-ecc*np.sin(phase-omega))* np.sqrt((1.-(np.cos(phase))**2.*(np.sin(inc))**2))
return delta | 797d84618ade3e84b63a1a40e7728de77d5465ca | 3,656,159 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.