content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def asses_completeness(language_code: str, sw: ServiceWorker = Depends(get_sw)):
"""
make a completion test for language: check fe,be, domains and entries
@param language_code:
@param sw:
@return:
"""
if language_code not in sw.messages.get_added_languages():
raise ApplicationException(HTTP_404_NOT_FOUND, "Language not yet added")
return sw.translation.asses_completion(language_code) | 9de6a9130ec34e47782679ac63d80707de5b98ce | 3,650,574 |
def create_intrinsic_node_class(cls):
"""
Create dynamic sub class
"""
class intrinsic_class(cls):
"""Node class created based on the input class"""
def is_valid(self):
raise TemplateAttributeError('intrisnic class shouldn\'t be directly used')
intrinsic_class.__name__ = '%s_intrinsic' % cls.__name__
return intrinsic_class | ddcb0ba5f36981288fd9748f1f533f02f1eb1604 | 3,650,575 |
def segment_fish(image):
"""Attempts to segment the clown fish out of the provided image."""
hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
light_orange = (1, 190, 200)
dark_orange = (18, 255, 255)
mask = cv2.inRange(hsv_image, light_orange, dark_orange)
light_white = (0, 0, 200)
dark_white = (145, 60, 255)
mask_white = cv2.inRange(hsv_image, light_white, dark_white)
final_mask = mask + mask_white
result = cv2.bitwise_and(image, image, mask=final_mask)
result = cv2.GaussianBlur(result, (7, 7), 0)
return result | c9ee166f12e9c344143f677939a82dd1a00a5fb5 | 3,650,577 |
def enable_faster_encoder(self, need_build=True, use_fp16=False):
"""
Compiles fusion encoder operator intergrated FasterTransformer using the
method of JIT(Just-In-Time) and replaces the `forward` function of
`paddle.nn.TransformerEncoder` and `paddle.nn.TransformerEncoderLayer`
objects inherited from `self` to support inference using FasterTransformer.
Examples:
.. code-block:: python
from paddlenlp.ops import enable_faster_encoder, disable_faster_encoder
model.eval()
model = enable_faster_encoder(model)
enc_out = model(src, src_mask)
model = disable_faster_encoder(model)
"""
def init_func(layer):
if isinstance(layer, TransformerEncoderLayer):
is_usable = True
if layer._config['bias_attr'] == False:
logger.warning("`False` for paddle.nn.TransformerEncoder's" \
" parameter `bias_attr` is not supported in " \
"FasterTransformer by now. The original forward" \
" will be involved.")
is_usable = False
if layer._config['activation'] not in ('relu', 'gelu'):
logger.warning("Only 'relu' or 'gelu' is supported by now. " \
"The original forward will be involved.")
is_usable = False
if is_usable:
layer.forward = layer._ft_forward
elif isinstance(layer, TransformerEncoder):
layer.forward = layer._ft_forward
if use_fp16:
convert_to_fp16(layer)
if not self.training:
if need_build:
try:
load("FasterTransformer", verbose=True)
except Exception:
logger.warning(
"Exception occurs when using FasterTransformer. " \
"The original forward will be involved. ")
return self
for layer in self.children():
layer.apply(init_func)
return self | 4da1f669cefd291df4bc790dfc68fcbe5ce93f86 | 3,650,579 |
def func(*x):
""" Compute the function to minimise.
Vector reshaped for more readability.
"""
res = 0
x = np.array(x)
x = x.reshape((n, 2))
for i in range(n):
for j in range(i+1, n):
(x1, y1), (x2, y2) = x[i, :], x[j, :]
delta = (x2 - x1)**2 + (y2 - y1)**2 - distances[i, j]**2
res += delta**2
return res | 775d4330ca77e04662f1920dd2160631deb30430 | 3,650,580 |
import torch
def transform_target(target, classes=None):
"""
Accepts target value either single dimensional torch.Tensor or (int, float)
:param target:
:param classes:
:return:
"""
if isinstance(target, torch.Tensor):
if target.ndim == 1:
target = target.item() if target.shape[0] == 1 else target
if target.ndim == 0 and classes is None:
return round(target.item(), 2)
if target.shape[0] == 1 and type(classes) in (list, tuple) and classes:
return classes[target]
# Multi-label
if target.shape[0] > 1 and type(classes) in (list, tuple) and classes:
return ",".join([classes[index] for index, value in enumerate(target) if value])
elif isinstance(target, int) and classes:
target = classes[target]
return target | 5e1423b4beac4385fa4f328bfdfeed2859c28f7b | 3,650,581 |
from typing import List
from typing import Tuple
def merge_all_regions(out_path: str, id_regions: List[Tuple[int, File]]) -> Tuple[int, int, File]:
"""
Recursively merge a list of region files.
"""
if len(id_regions) == 1:
# Base case 1.
[(sample_id, region_file)] = id_regions
return (sample_id, sample_id, region_file)
elif len(id_regions) == 2:
# Base case 2.
[(sample1_id, region1_file), (sample2_id, region2_file)] = id_regions
else:
# Recursive case.
k = find_midpoint(len(id_regions))
sample1_id, _, region1_file = merge_all_regions(out_path, id_regions[:k])
_, sample2_id, region2_file = merge_all_regions(out_path, id_regions[k:])
return (
sample1_id,
sample2_id,
merge_regions(out_path, sample1_id, region1_file, sample2_id, region2_file),
) | d9ebbdfec49b6e5702e4c16476a20440185e39ef | 3,650,582 |
def check_for_collision(sprite1: arcade.Sprite,
sprite2: arcade.Sprite) -> bool:
"""Check for collision between two sprites.
Used instead of Arcade's default implementation as we need a hack to
return False if there is just a one pixel overlap, if it's not
multiplayer...
"""
allowed_overlap = 0
if isinstance(sprite1, player.Player):
if isinstance(sprite1.game, game.Game):
allowed_overlap = 1
x_collision = (
sprite1.right - allowed_overlap > sprite2.left + allowed_overlap
and sprite1.left + allowed_overlap < sprite2.right - allowed_overlap
)
if not x_collision:
return False
return (
sprite1.top - allowed_overlap > sprite2.bottom + allowed_overlap
and sprite1.bottom + allowed_overlap < sprite2.top - allowed_overlap
) | 679de76d880c2e2e9ac34e0d87cc5cdd0211daa9 | 3,650,584 |
def modify_color(hsbk, **kwargs):
"""
Helper function to make new colors from an existing color by modifying it.
:param hsbk: The base color
:param hue: The new Hue value (optional)
:param saturation: The new Saturation value (optional)
:param brightness: The new Brightness value (optional)
:param kelvin: The new Kelvin value (optional)
"""
return hsbk._replace(**kwargs) | ecc5118873aaf0e4f63bad512ea61d2eae0f7ead | 3,650,585 |
def train_val_test_split(df, train_p=0.8, val_p=0.1, state=1, shuffle=True):
"""Wrapper to split data into train, validation, and test sets.
Parameters
-----------
df: pd.DataFrame, np.ndarray
Dataframe containing features (X) and labels (y).
train_p: float
Percent of data to assign to train set.
val_p: float
Percent of data to assign to validation set.
state: int or None
Int will make the split repeatable. None will give a different random
split each time.
shuffle: bool
If True, randomly shuffle the data before splitting.
"""
test_p = 1 - val_p / (1 - train_p)
train, val = train_test_split(df, train_size=train_p, shuffle=shuffle,
random_state=state)
test = None
if not np.isclose(test_p, 0):
val, test = train_test_split(val, test_size=test_p, random_state=state)
return train, val, test | 67b50b172f94ee65981ab124f03e192c7631c49c | 3,650,586 |
def add_logs_to_table_heads(max_logs):
"""Adds log headers to table data depending on the maximum number of logs from trees within the stand"""
master = []
for i in range(2, max_logs + 1):
for name in ['Length', 'Grade', 'Defect']:
master.append(f'Log {i} {name}')
if i < max_logs:
master.append('Between Logs Feet')
return master | 5db494650901bfbb114135da9596b9b453d47568 | 3,650,587 |
def stations_at_risk(stations, level):
"""Returns a list of tuples, (station, risk_level) for all stations with risk above level"""
level = risk_level(level)
stations = [(i, station_flood_risk(i)) for i in stations]
return [i for i in stations if risk_level(i[1]) >= level] | c18ef9af1ac02633f2daed9b88dfe6d72e83481a | 3,650,588 |
def unproxy(proxy):
"""Return a new copy of the original function of method behind a proxy.
The result behaves like the original function in that calling it
does not trigger compilation nor execution of any compiled code."""
if isinstance(proxy, types.FunctionType):
return _psyco.unproxycode(proxy.func_code)
if isinstance(proxy, types.MethodType):
f = unproxy(proxy.im_func)
return new.instancemethod(f, proxy.im_self, proxy.im_class)
raise TypeError, "%s objects cannot be proxies" % type(proxy).__name__ | 7fad2339a8e012fd95117b73b79a371d4488e439 | 3,650,590 |
from typing import Optional
def get_measured_attribute(data_model, metric_type: str, source_type: str) -> Optional[str]:
"""Return the attribute of the entities of a source that are measured in the context of a metric.
For example, when using Jira as source for user story points, the points of user stories (the source entities) are
summed to arrive at the total number of user story points.
"""
attribute = (
data_model["sources"].get(source_type, {}).get("entities", {}).get(metric_type, {}).get("measured_attribute")
)
return str(attribute) if attribute else attribute | f15379e528b135ca5d9d36f50f06cb95a145b477 | 3,650,591 |
def getIntArg(arg, optional=False):
"""
Similar to "getArg" but return the integer value of the arg.
Args:
arg (str): arg to get
optional (bool): argument to get
Returns:
int: arg value
"""
return(int(getArg(arg, optional))) | a30e39b5a90bd6df996bdd8a43faf787aed7128f | 3,650,593 |
from typing import Iterable
def get_in_with_default(keys: Iterable, default):
"""`get_in` function, returning `default` if a key is not there.
>>> get_in_with_default(["a", "b", 1], 0)({"a": {"b": [0, 1, 2]}})
1
>>> get_in_with_default(["a", "c", 1], 0)({"a": {"b": [0, 1, 2]}})
0
"""
getter = get_in(keys)
def get_in_with_default(x):
try:
return getter(x)
except (KeyError, IndexError, TypeError):
return default
return get_in_with_default | dbb5a9753bad224245ffea884e33802930bb8ded | 3,650,594 |
def conv_HSV2BGR(hsv_img):
"""HSV画像をBGR画像に変換します。
Arguments:
hsv_img {numpy.ndarray} -- HSV画像(3ch)
Returns:
numpy.ndarray -- BGR画像(3ch)
"""
V = hsv_img[:, :, 2]
C = hsv_img[:, :, 1]
H_p = hsv_img[:, :, 0] / 60
X = C * (1 - np.abs(H_p % 2 - 1))
Z = np.zeros_like(C)
vals = [[Z, X, C], [Z, C, X], [X, C, Z], [C, X, Z], [C, Z, X], [X, Z, C]]
bgr_img = np.zeros_like(hsv_img)
for i in range(6):
idx = (i <= H_p) * (H_p < (i + 1))
bgr_img[:, :, 0][idx] = (V - C)[idx] + vals[i][0][idx]
bgr_img[:, :, 1][idx] = (V - C)[idx] + vals[i][1][idx]
bgr_img[:, :, 2][idx] = (V - C)[idx] + vals[i][2][idx]
return (bgr_img * 255).astype(np.uint8) | f748c88e9f4b2a3da2ee7d7703b0d3c9615e564b | 3,650,595 |
import torch
def remap(tensor, map_x, map_y, align_corners=False):
"""
Applies a generic geometrical transformation to a tensor.
"""
if not tensor.shape[-2:] == map_x.shape[-2:] == map_y.shape[-2:]:
raise ValueError("Inputs last two dimensions must match.")
batch_size, _, height, width = tensor.shape
# grid_sample need the grid between -1/1
map_xy = torch.stack([map_x, map_y], dim=-1)
map_xy_norm = normalize_pixel_coordinates(map_xy, height, width)
# simulate broadcasting since grid_sample does not support it
map_xy_norm = map_xy_norm.expand(batch_size, -1, -1, -1)
# warp ans return
tensor_warped = F.grid_sample(tensor, map_xy_norm, align_corners=align_corners)
return tensor_warped | ff88d66b6692548979e45d2a00f6905e2d973c2a | 3,650,596 |
def AutoRegression(df_input,
target_column,
time_column,
epochs_to_forecast=1,
epochs_to_test=1,
hyper_params_ar={}):
"""
This function performs regression using feature augmentation and then training XGB with Crossvalidation.
Parameters:
- df_input (pandas.DataFrame): Input Time Series.
- target_column (str): name of the column containing the target feature
- time_column (str): name of the column containing the pandas Timestamps
- frequency_data (str): string representing the time frequency of record, e.g. "h" (hours), "D" (days), "M" (months)
- epochs_to_forecast (int): number of steps for predicting future data
- epochs_to_test (int): number of steps corresponding to most recent records to test on
- hyper_params_ar: Parameters of AR model
Returns:
- df_output (pandas.DataFrame): Output DataFrame with forecast
"""
# create and evaluate an updated autoregressive model
# load dataset
input_series = df_input[:-(epochs_to_forecast+epochs_to_test)].set_index(time_column, 1)[target_column]
# split dataset
model = ar_select_order(input_series, **hyper_params_ar)
for hyp_param in ["maxlag","glob","ic"]:
if hyp_param in hyper_params_ar.keys():
del hyper_params_ar[hyp_param]
model = AutoReg(input_series, lags=model.ar_lags, **hyper_params_ar)
res = model.fit()
print(res.summary())
#start_idx = df_input[:-(epochs_to_forecast+epochs_to_test)][time_column].max()
start_idx = df_input[-(epochs_to_forecast+epochs_to_test):][time_column].min()
end_idx = df_input[-(epochs_to_forecast+epochs_to_test):][time_column].max()
# =============================================================================
# ### for statsmodels< 0.12.0
# #forecast_steps = model.predict(res.params, start=start_idx, end=end_idx, dynamic=True)
# forecast = df_input[target_column] * np.nan
# forecast[-(epochs_to_forecast+epochs_to_test):] = forecast_steps
# df_output = df_input.copy()
# df_output["forecast"] = forecast
# df_output["forecast_up"] = forecast * 1.1
# df_output["forecast_low"] = forecast * 0.9
# =============================================================================
### for statsmodels>= 0.12.0
forecast_steps = res.get_prediction(start=start_idx, end=end_idx)
forecast_steps_mean = forecast_steps.predicted_mean
forecast_steps_low = forecast_steps.conf_int()["lower"]
forecast_steps_up = forecast_steps.conf_int()["upper"]
forecast = df_input[target_column] * np.nan
forecast_low = df_input[target_column] * np.nan
forecast_up = df_input[target_column] * np.nan
forecast[-(epochs_to_forecast+epochs_to_test):] = forecast_steps_mean
forecast_low[-(epochs_to_forecast+epochs_to_test):] = forecast_steps_low
forecast_up[-(epochs_to_forecast+epochs_to_test):] = forecast_steps_up
df_output = df_input.copy()
df_output["forecast"] = forecast
df_output["forecast_low"] = forecast_low
df_output["forecast_up"] = forecast_up
return df_output | 704daf914897b7a43971b22d721ec0f1bb919d3e | 3,650,597 |
def VMACD(prices, timeperiod1=12, timeperiod2=26, timeperiod3=9):
"""
39. VMACD量指数平滑异同移动平均线
(Vol Moving Average Convergence and Divergence,VMACD)
说明:
量平滑异同移动平均线(VMACD)用于衡量量能的发展趋势,属于量能引趋向指标。
MACD称为指数平滑异同平均线。分析的数学公式都是一样的,只是分析的物理量不同。
VMACD对成交量VOL进行分析计算,而MACD对收盘价CLOSE进行分析计算。
计算方法:
SHORT=EMA(VOL,N1)
LONG=EMA(VOL,N2)
DIFF=SHORT-LONG
DEA=EMA(DIFF,M)
VMACD=DIFF-DEA
通常N1=12,N2=26,M=9
:param prices:
:param timeperiod1: N1
:param timeperiod2: N2
:param timeperiod3: N3
:return:
"""
assert prices is not None
timeperiod = max(timeperiod1, timeperiod2, timeperiod3)
_assert_greater_or_equal(len(prices), timeperiod)
assert isinstance(timeperiod1, int)
assert isinstance(timeperiod2, int)
assert isinstance(timeperiod3, int)
df_price = prices.copy()
df_price = df_price.sort_index(ascending=True)
EMA = ta.EMA
short = EMA(df_price['volume'].values.astype(float), timeperiod1)
long = EMA(df_price['volume'].values.astype(float), timeperiod2)
diff = short - long
dea = EMA(diff, timeperiod3)
vmacd = diff - dea
df_price['VMACD'] = vmacd
return df_price['VMACD'] | 5de5f372cb7ef6762b82f30d16465469b2cb6afc | 3,650,598 |
from . import graphics
def merge_all_mods(list_of_mods, gfx=None):
"""Merges the specified list of mods, starting with graphics if set to
pre-merge (or if a pack is specified explicitly).
Params:
list_of_mods
a list of the names of mods to merge
gfx
a graphics pack to be merged in
Returns:
A list of status ints for each mod given:
-1: Unmerged
0: Merge was successful, all well
1: Potential compatibility issues, no merge problems
2: Non-fatal error, overlapping lines or non-existent mod etc
3: Fatal error, not returned (rebuilds to previous, rest unmerged)
"""
clear_temp()
if gfx:
add_graphics(gfx)
elif will_premerge_gfx():
add_graphics(graphics.current_pack())
ret_list = []
for i, mod in enumerate(list_of_mods):
status = merge_a_mod(mod)
ret_list.append(status)
if status == 3:
log.i('Mod {}, in {}, could not be merged.'.format(
mod, str(list_of_mods)))
merged = merge_all_mods(list_of_mods[:i-1], gfx)
return merged + [-1]*len(list_of_mods[i:])
return ret_list | c0b6ed6df7116a0abcb0c2674c8bddabd4a52f82 | 3,650,600 |
def pearson_r_p_value(a, b, dim):
"""
2-tailed p-value associated with pearson's correlation coefficient.
Parameters
----------
a : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
b : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
dim : str
The dimension to apply the correlation along.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
2-tailed p-value.
See Also
--------
scipy.stats.pearsonr
xarray.apply_unfunc
"""
return xr.apply_ufunc(_pearson_r_p_value, a, b,
input_core_dims=[[dim], [dim]],
kwargs={'axis': -1}) | d9236eaf1d7315fd61eba35bdd4cdc4f27cb9890 | 3,650,601 |
from datetime import datetime
import time
def get_ceilometer_usages(date, connection_string):
"""
Function which talks with openstack
"""
today = datetime.datetime.combine(date, datetime.datetime.min.time())
yesterday = today - datetime.timedelta(days=1)
engine = create_engine(connection_string)
connection = engine.connect()
query = CEILOMETER_QUERY.format(
from_ts=time.mktime(yesterday.timetuple()),
to_ts=time.mktime(today.timetuple())
)
return connection.execute(query) | b05e7f2024ebf2e2eb23a914da71b834debb66cc | 3,650,602 |
def fit_kij(kij_bounds, eos, mix, datavle=None, datalle=None, datavlle=None,
weights_vle=[1., 1.], weights_lle=[1., 1.],
weights_vlle=[1., 1., 1., 1.], minimize_options={}):
"""
fit_kij: attemps to fit kij to VLE, LLE, VLLE
Parameters
----------
kij_bounds : tuple
bounds for kij correction
eos : function
cubic eos to fit kij for qmr mixrule
mix: object
binary mixture
datavle: tuple, optional
(Xexp, Yexp, Texp, Pexp)
datalle: tuple, optional
(Xexp, Wexp, Texp, Pexp)
datavlle: tuple, optional
(Xexp, Wexp, Yexp, Texp, Pexp)
weights_vle: list or array_like, optional
weights_vle[0] = weight for Y composition error, default to 1.
weights_vle[1] = weight for bubble pressure error, default to 1.
weights_lle: list or array_like, optional
weights_lle[0] = weight for X (liquid 1) composition error, default to 1.
weights_lle[1] = weight for W (liquid 2) composition error, default to 1.
weights_vlle: list or array_like, optional
weights_vlle[0] = weight for X (liquid 1) composition error, default to 1.
weights_vlle[1] = weight for W (liquid 2) composition error, default to 1.
weights_vlle[2] = weight for Y (vapor) composition error, default to 1.
weights_vlle[3] = weight for equilibrium pressure error, default to 1.
minimize_options: dict
Dictionary of any additional spefication for scipy minimize_scalar
Returns
-------
fit : OptimizeResult
Result of SciPy minimize
"""
fit = minimize_scalar(fobj_kij, kij_bounds, args=(eos, mix, datavle,
datalle, datavlle, weights_vle, weights_lle,
weights_vlle), **minimize_options)
return fit | 0f2e05a64599b49f70b327e8a69a66647b4c344f | 3,650,603 |
def calc_ac_score(labels_true, labels_pred):
"""calculate unsupervised accuracy score
Parameters
----------
labels_true: labels from ground truth
labels_pred: labels form clustering
Return
-------
ac: accuracy score
"""
nclass = len(np.unique(labels_true))
labels_size = len(labels_true)
mat = labels_size * np.ones((nclass, nclass))
idx = 0
for i in range(labels_size):
mat[labels_pred[i], labels_true[i]] -= 1.0
munkres = Munkres()
mapping = munkres.compute(mat)
ac = 0.0
for i in range(labels_size):
val = mapping[labels_pred[i]][1]
if val == labels_true[i]:
ac += 1.0
ac = ac / labels_size
return ac | 39ca30d3cdcf683dda04d429146775cffd7c0134 | 3,650,606 |
def wave_ode_gamma_neq0(t, X, *f_args):
"""
Right hand side of the wave equation ODE when gamma > 0
"""
C = f_args[0]
D = f_args[1]
CD = C*D
x, y, z = X
return np.array([-(1./(1.+y) + CD)*x + C*(1+D*CD)*(z-y), x, CD*(z-y)]) | 4b2f5f7b5b4e1c932e0758e9be10fcbc5d9fbbb7 | 3,650,607 |
from typing import Dict
def run_workflow(
config: Dict,
form_data: ImmutableMultiDict,
*args,
**kwargs
) -> Dict:
"""Executes workflow and save info to database; returns unique run id."""
# Validate data and prepare run environment
form_data_dict = __immutable_multi_dict_to_nested_dict(
multi_dict=form_data
)
__validate_run_workflow_request(data=form_data_dict)
__check_service_info_compatibility(data=form_data_dict)
document = __init_run_document(data=form_data_dict)
document = __create_run_environment(
config=config,
document=document,
**kwargs
)
# Start workflow run in background
__run_workflow(
config=config,
document=document,
**kwargs
)
response = {'run_id': document['run_id']}
return response | bfa732ceaef6fbd6865e015b9c28da68932fa2db | 3,650,608 |
from typing import List
def insertion_stack(nums: List[int]) -> List[int]:
""" A helper function that sort the data in an ascending order
Args:
nums: The original data
Returns:
a sorted list in ascending order
"""
left = []
right = []
for num in nums:
while left and left[-1] > num:
right.append(left.pop())
left.append(num)
while right:
left.append(right.pop())
return left | 045e28d763ece3dac9e1f60d50a0d51c43b75664 | 3,650,609 |
def svn_wc_get_pristine_contents(*args):
"""svn_wc_get_pristine_contents(char const * path, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t"""
return _wc.svn_wc_get_pristine_contents(*args) | 5a26e358bbd2a4341bdb1c572f98d419f676a725 | 3,650,610 |
def create_cache_key(func, key_dict=None, self=None):
"""Get a cache namespace and key used by the beaker_cache decorator.
Example::
from tg import cache
from tg.caching import create_cache_key
namespace, key = create_cache_key(MyController.some_method)
cache.get_cache(namespace).remove(key)
"""
kls = None
imfunc = im_func(func)
if imfunc:
kls = im_class(func)
func = imfunc
cache_key = func.__name__
else:
cache_key = func.__name__
if key_dict:
cache_key += " " + " ".join("%s=%s" % (k, v)
for k, v in key_dict.items())
if not kls and self:
kls = getattr(self, '__class__', None)
if kls:
return '%s.%s' % (kls.__module__, kls.__name__), cache_key
else:
return func.__module__, cache_key | 461fc998a7345d646fdaa61fd36f91c3c250d331 | 3,650,614 |
def longest_common_substring(s, t):
"""
Find the longest common substring between the given two strings
:param s: source string
:type s: str
:param t: target string
:type t: str
:return: the length of the longest common substring
:rtype: int
"""
if s == '' or t == '':
return 0
f = [[0 for _ in range(len(t) + 1)]
for _ in range(len(s) + 1)]
for i in range(len(s)):
for j in range(len(t)):
if s[i] == t[j]:
f[i + 1][j + 1] = f[i][j] + 1
return max(map(max, f)) | 66aef17a117c6cc96205664f4c603594ca496092 | 3,650,615 |
def correct_predictions(output_probabilities, targets):
"""
Compute the number of predictions that match some target classes in the
output of a model.
Args:
output_probabilities: A tensor of probabilities for different output
classes.
targets: The indices of the actual target classes.
Returns:
The number of correct predictions in 'output_probabilities'.
"""
_, out_classes = output_probabilities.max(dim=1)
correct = (out_classes == targets).sum()
return out_classes, correct.item() | 1bff085d95da7b37bb2232b6ac03b034e2bdb6b9 | 3,650,616 |
def resolve_all(anno, task):
"""Resolve all pending annotations."""
return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) | ca127999972644ad25741bc48c78d67aaa4adeec | 3,650,617 |
import socket
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port | d1a514a47a906c946fa3a8cb4312e71bc4f7570e | 3,650,618 |
def get_diff_list(small_list, big_list):
"""
Get the difference set of the two list.
:param small_list: The small data list.
:param big_list: The bigger data list.
:return: diff_list: The difference set list of the two list.
"""
# big_list有而small_list没有的元素
diff_list = list(set(big_list).difference(set(small_list)))
return diff_list | f92d20e6edd1f11ca6436a3ada4a6ba71da37457 | 3,650,619 |
def blend_weight_arrays(n_weightsA, n_weightsB, value=1.0, weights_pp=None):
"""
Blend two 2d weight arrays with a global mult factor, and per point weight values.
The incoming weights_pp should be a 1d array, as it's reshaped for the number of influences.
Args:
n_weightsA (np.array): Weight array to blend towards n_weightsB.
n_weightsB (np.array): Target weight array to move n_weightsA towards.
value (float): Global mult factor.
weights_pp (list/float): Per point weight values. This should be a 1d array.
Returns (numpy.ndarray): Blended weights array.
"""
if n_weightsA.shape != n_weightsB.shape:
raise ValueError('Shape of both arrays must match: {}, {}'.format(n_weightsA.shape, n_weightsB.shape))
weights_pp = weights_pp or np.ones(n_weightsA.shape[0])
weights_pp = np.repeat(weights_pp, n_weightsA.shape[1]).reshape(-1, n_weightsA.shape[1]) * value
n_weights = np_interp_by_weight(n_weightsA, n_weightsB, weights_pp)
return n_weights | f5167730773718952f48a67970d62a197bd92944 | 3,650,620 |
def weight_kabsch_dist(x1, x2, weights):
"""
Compute the Mahalabonis distance between positions x1 and x2 given Kabsch weights (inverse variance)
x1 (required) : float64 array with dimensions (n_atoms,3) of one molecular configuration
x2 (required) : float64 array with dimensions (n_atoms,3) of another molecular configuration
weights (required) : float64 matrix with dimensions (n_atoms, n_atoms) of inverse (n_atoms, n_atoms) covariance
"""
# zero distance
dist = 0.0
# compute distance as sum over indepdent (because covar is n_atoms x n_atoms) dimensions
for i in range(3):
disp = x1[:,i] - x2[:,i]
dist += np.dot(disp,np.dot(weights,disp))
# return value
return dist | e03c86875873af3b890fc3cfa799f037c808196e | 3,650,621 |
def calc_color_rarity(color_frequencies: dict) -> float:
"""
Return rarity value normalized to 64.
Value ascending from 0 (most rare) to 64 (most common).
"""
percentages = calc_pixel_percentages(color_frequencies)
weighted_rarity = [PERCENTAGES_NORMALIZED.get(k) * v * 64 for k,v in percentages.items()]
return sum(weighted_rarity) | 54dd3dde36dc02101b5536630e79d3d39fe18aa8 | 3,650,622 |
def exp_map(x, r, tangent_point=None):
"""
Let \(\mathcal{M}\) be a CCM of radius `r`, and \(T_{p}\mathcal{M}\) the
tangent plane of the CCM at point \(p\) (`tangent_point`).
This function maps a point `x` on the tangent plane to the CCM, using the
Riemannian exponential map.
:param x: np.array, point on the tangent plane (intrinsic coordinates);
:param r: float, radius of the CCM;
:param tangent_point: np.array, origin of the tangent plane on the CCM
(extrinsic coordinates); if `None`, defaults to `[0., ..., 0., r]`.
:return: the exp-map of x to the CCM (extrinsic coordinates).
"""
extrinsic_dim = x.shape[-1] + 1
if tangent_point is None:
tangent_point = np.zeros((extrinsic_dim,))
tangent_point[-1] = np.abs(r)
if isinstance(tangent_point, np.ndarray):
if tangent_point.shape != (extrinsic_dim,) and tangent_point.shape != (1, extrinsic_dim):
raise ValueError('Expected tangent_point of shape ({0},) or (1, {0}), got {1}'.format(extrinsic_dim, tangent_point.shape))
if tangent_point.ndim == 1:
tangent_point = tangent_point[np.newaxis, ...]
if not belongs(tangent_point, r)[0]:
raise ValueError('Tangent point must belong to manifold {}'.format(tangent_point))
else:
raise TypeError('tangent_point must be np.array or None')
if r > 0.:
return SphericalManifold.exp_map(tangent_point, x)
elif r < 0.:
return HyperbolicManifold.exp_map(tangent_point, x)
else:
return x | 2544e6f6054c602d5eae438b405b55dc995d190a | 3,650,623 |
def _get_data_column_label_in_name(item_name):
"""
:param item_name: Name of a group or dataset
:return: Data column label or ``None``
:rtype: str on None
"""
# /1.1/measurement/mca_0 should not be interpreted as the label of a
# data column (let's hope no-one ever uses mca_0 as a label)
if measurement_mca_group_pattern.match(item_name):
return None
data_column_match = measurement_data_pattern.match(item_name)
if not data_column_match:
return None
return data_column_match.group(1) | 58a50f9b28a8dd3c30eb609bbf61eeaf1b821238 | 3,650,625 |
def _auto_backward(loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None,
distop_context=None):
"""
modification is inplaced
"""
act_no_grad_set = _get_no_grad_set(loss, no_grad_set)
assert isinstance(loss, Variable), "The target loss should be an Variable."
if callbacks is None:
callbacks = [error_clip_callback]
else:
assert (isinstance(callbacks, list))
assert len(loss.shape) == 1 and loss.shape[0] == 1, \
"The loss.shape should be (1L,), but the current loss.shape is {}. " \
"Maybe that you should call fluid.layers.mean to process the current loss.".format(
loss.shape)
program = loss.block.program
with program_guard(program, startup_program):
params_grads = append_backward(
loss,
parameter_list,
act_no_grad_set,
callbacks,
distop_context=distop_context)
return params_grads | f7c08e9677768faf125ccc2a273016312004c225 | 3,650,626 |
import re
def strip_from_ansi_esc_sequences(text):
"""
find ANSI escape sequences in text and remove them
:param text: str
:return: list, should be passed to ListBox
"""
# esc[ + values + control character
# h, l, p commands are complicated, let's ignore them
seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]"
regex = re.compile(seq_regex)
start = 0
response = ""
for match in regex.finditer(text):
end = match.start()
response += text[start:end]
start = match.end()
response += text[start:len(text)]
return response | 8597654defffbdde33b844a34e95bf7893a36855 | 3,650,627 |
def _concat_columns(args: list):
"""Dispatch function to concatenate DataFrames with axis=1"""
if len(args) == 1:
return args[0]
else:
_lib = cudf if HAS_GPU and isinstance(args[0], cudf.DataFrame) else pd
return _lib.concat(
[a.reset_index(drop=True) for a in args],
axis=1,
)
return None | e60a3d5120e50dbd2d1be5632042e702e5780bc6 | 3,650,628 |
import re
def applyRegexToList(list, regex, separator=' '):
"""Apply a list of regex to list and return result"""
if type(regex) != type(list):
regex = [regex]
regexList = [re.compile(r) for r in regex]
for r in regexList:
list = [l for l in list if r.match(l)]
list = [l.split(separator) for l in list]
return [i[0] for i in list] | eee1edebf361f9516e7b40ba793b0d13ea3070f3 | 3,650,629 |
def GetFileName(path: str) -> str:
"""Get the name of the file from the path
:type path: str
:rtype: str
"""
return splitext(basename(path))[0] | 4aa3a8b75a1ed926c173f9d978504ca2ed653e20 | 3,650,631 |
import re
from functools import reduce
def collapse(individual_refs):
"""Collapse references like [C1,C2,C3,C7,C10,C11,C12,C13] into 'C1-C3, C7, C10-C13'.
Args:
individual_refs (string): Uncollapsed references.
Returns:
string: Collapsed references.
"""
parts = []
for ref in individual_refs:
mtch = re.match(r"(?P<part_prefix>\D+)(?P<number>.+)", ref)
if mtch is not None:
part_prefix = mtch.group("part_prefix")
number = mtch.group("number")
try:
number = int(mtch.group("number"))
except ValueError:
pass
parts.append((part_prefix, number))
parts.sort()
def toRef(part):
return "{}{}".format(part[0], part[1])
def make_groups(accumulator, part):
prev = None
if len(accumulator) > 0:
group = accumulator[-1]
if len(group) > 0:
prev = group[-1]
if (prev != None) and (prev[0] == part[0]) and isinstance(prev[1], int) and ((prev[1] + 1) == part[1]):
group.append(part)
accumulator[-1] = group
else:
accumulator.append([part])
return accumulator
groups = reduce(make_groups, parts, [])
groups = map(lambda g: tuple(map(toRef, g)), groups)
collapsed = ""
for group in groups:
if (len(collapsed) > 1) and (collapsed[-2] != ","):
collapsed += ", "
if len(group) > 2:
collapsed += group[0] + "-" + group[-1]
else:
collapsed += ", ".join(group)
return collapsed | f4225586d30960cae74123806b8d44ff6f007584 | 3,650,632 |
def generate_fig_univariate_categorical(
df_all: pd.DataFrame,
col: str,
hue: str,
nb_cat_max: int = 7,
) -> plt.Figure:
"""
Returns a matplotlib figure containing the distribution of a categorical feature.
If the feature is categorical and contains too many categories, the smallest
categories are grouped into a new 'Other' category so that the graph remains
readable.
Parameters
----------
df_all : pd.DataFrame
The input dataframe that contains the column of interest
col : str
The column of interest
hue : str
The column used to distinguish the values (ex. 'train' and 'test')
nb_cat_max : int
The number max of categories to be displayed. If the number of categories
is greater than nb_cat_max then groups smallest categories into a new
'Other' category
Returns
-------
matplotlib.pyplot.Figure
"""
df_cat = df_all.groupby([col, hue]).agg({col: 'count'})\
.rename(columns={col: "count"}).reset_index()
df_cat['Percent'] = df_cat['count'] * 100 / df_cat.groupby(hue)['count'].transform('sum')
if pd.api.types.is_numeric_dtype(df_cat[col].dtype):
df_cat = df_cat.sort_values(col, ascending=True)
df_cat[col] = df_cat[col].astype(str)
nb_cat = df_cat.groupby([col]).agg({'count': 'sum'}).reset_index()[col].nunique()
if nb_cat > nb_cat_max:
df_cat = _merge_small_categories(df_cat=df_cat, col=col, hue=hue, nb_cat_max=nb_cat_max)
fig, ax = plt.subplots(figsize=(7, 4))
sns.barplot(data=df_cat, x='Percent', y=col, hue=hue,
palette=dict_color_palette, ax=ax)
for p in ax.patches:
ax.annotate("{:.1f}%".format(np.nan_to_num(p.get_width(), nan=0)),
xy=(p.get_width(), p.get_y() + p.get_height() / 2),
xytext=(5, 0), textcoords='offset points', ha="left", va="center")
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# Removes plot borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
new_labels = [truncate_str(i.get_text(), maxlen=45) for i in ax.yaxis.get_ticklabels()]
ax.yaxis.set_ticklabels(new_labels)
return fig | 9e6f9b8739b1907f67c864ceaf177f9f1007d35b | 3,650,634 |
def pt_sharp(x, Ps, Ts, window_half, method='diff'):
"""
Calculate the sharpness of extrema
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
window_half : int
Number of samples in each direction around extrema to use for sharpness estimation
Returns
-------
Psharps : array-like 1d
sharpness of peaks
Tsharps : array-like 1d
sharpness of troughs
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Calculate the sharpness of each peak
P = len(Ps)
Psharps = np.zeros(P)
for e in range(P):
if method == 'deriv':
Edata = x[Ps[e]-window_half: Ps[e]+window_half+1]
Psharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Psharps[e] = np.mean((x[Ps[e]]-x[Ps[e]-window_half],x[Ps[e]]-x[Ps[e]+window_half]))
T = len(Ts)
Tsharps = np.zeros(T)
for e in range(T):
if method == 'deriv':
Edata = x[Ts[e]-window_half: Ts[e]+window_half+1]
Tsharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Tsharps[e] = np.mean((x[Ts[e]-window_half]-x[Ts[e]],x[Ts[e]+window_half]-x[Ts[e]]))
return Psharps, Tsharps | 6d06b9343c71115fc660a298569794933267bd51 | 3,650,635 |
from datetime import datetime
def convert_date(string, report_date, bad_dates_rep, bad_dates_for):
"""
Converts date string in format dd/mm/yyyy
to format dd-Mmm-yyyy
"""
x = string.split('/')
try:
date = datetime.datetime(int(x[2]),int(x[1]),int(x[0]))
date_str = date.strftime("%Y-%m-%d")
return(date_str)
# Print out cases that do not match input date convention
except (IndexError, ValueError) as errors:
bad_dates_rep.append(report_date)
bad_dates_for.append(string)
return(string)
pass | f84db7bc2edc070a4c6b9c475458081701bca1eb | 3,650,637 |
def render_raw(request, paste, data):
"""Renders RAW content."""
return HttpResponse(paste.content, content_type="text/plain") | 2ec6fdb719e831988a4384e3690d2bec0faad405 | 3,650,638 |
def node_avg():
"""get the avg of the node stats"""
node_raw = ["average", 0, 0, 0]
for node in node_stats():
node_raw[1] += float(node[1])
node_raw[2] += float(node[2])
node_raw[3] += float(node[3])
num = len(node_stats())
node_avg = ["average",
"{:.2f}".format(node_raw[1]/num),
"{:.2f}".format(node_raw[2]/num),
"{:.2f}".format(node_raw[3]/num)]
return node_avg | 985e1f848945d8952ec224a0dd56a02e84b2ea57 | 3,650,639 |
from typing import Union
def decrypt_vault_password(key: bytes, password: Union[str, bytes]) -> Union[str, bool]:
"""Decrypt and return the given vault password.
:param key: The key to be used during the decryption
:param password: The password to decrypt
"""
if isinstance(password, str):
password = password.encode("utf-8")
f = Fernet(key)
try:
return f.decrypt(password).decode()
except InvalidToken:
return False | 3311b6dc7a9fba4152545ff3ca89881e9ceebb94 | 3,650,640 |
from typing import Optional
def get_gv_rng_if_none(rng: Optional[rnd.Generator]) -> rnd.Generator:
"""get gym-gridverse module rng if input is None"""
return get_gv_rng() if rng is None else rng | 008bf9d22fb6c9f07816e62c2174c60839a5353f | 3,650,642 |
def fill_name(f):
"""
Attempts to generate an unique id and a parent from a BioPython SeqRecord.
Mutates the feature dictionary passed in as parameter.
"""
global UNIQUE
# Get the type
ftype = f['type']
# Get gene name
gene_name = first(f, "gene")
# Will attempt to fill in the uid from attributes.
uid = ''
# Deal with known types.
if ftype == 'gene':
name = gene_name or first(f, "locus_tag")
uid = name
elif ftype == 'CDS':
count = get_next_count(ftype=ftype, label=gene_name)
prot = first(f, "protein_id") or f"{gene_name}-CDS-{count}"
uid = f"{prot}"
name = prot
elif ftype == 'mRNA':
count = get_next_count(ftype=ftype, label=gene_name)
uid = first(f, "transcript_id") or f"{gene_name}-mRNA-{count}"
name = uid
elif ftype == "exon":
name = gene_name
else:
name = first(f, "organism") or first(f, "transcript_id") or None
uid = first(f, "transcript_id")
# Set the unique identifier.
f['id'] = uid or f"{ftype}-{next(COUNTER)}"
# Set the feature name.
f['name'] = name or ftype
return f | d2351eb509d72b6b2ef34b7c0b01c339acd52677 | 3,650,643 |
def run_single_softmax_experiment(beta, alpha):
"""Run experiment with agent using softmax update rule."""
print('Running a contextual bandit experiment')
cb = ContextualBandit()
ca = ContextualAgent(cb, beta=beta, alpha=alpha)
trials = 360
for _ in range(trials):
ca.run()
df = DataFrame(ca.log, columns=('context', 'action', 'reward', 'Q(c,23)',
'Q(c,14)', 'Q(c,8)', 'Q(c,3)'))
# fn = 'softmax_experiment.csv'
# df.to_csv(fn, index=False)
# print('Sequence written in', fn)
# globals().update(locals()) #
return df | 953c07ae1cdc25782f24206a0ce02bf4fc15202b | 3,650,644 |
def available(name):
"""
Returns ``True`` if the specified service is available, otherwise returns
``False``.
We look up the name with the svcs command to get back the FMRI
This allows users to use simpler service names
CLI Example:
.. code-block:: bash
salt '*' service.available net-snmp
"""
cmd = "/usr/bin/svcs -H -o FMRI {0}".format(name)
name = __salt__["cmd.run"](cmd, python_shell=False)
return name in get_all() | 371980f44a348faf83ab32b9d50583fc8e9bae41 | 3,650,645 |
def coincidence_rate(text):
""" Return the coincidence rate of the given text
Args:
text (string): the text to get measured
Returns:
the coincidence rate
"""
ko = 0
# measure the frequency of each letter in the cipher text
for letter in _VOCAB:
count = text.count(letter)
ko = ko + (count * (count - 1))
return ko / (len(text) * (len(text) - 1)) | ca1ca3d8b746ea40ba07af1cb96a194bf14c1d98 | 3,650,646 |
import numpy
def convert_bytes_to_ints(in_bytes, num):
"""Convert a byte array into an integer array. The number of bytes forming an integer
is defined by num
:param in_bytes: the input bytes
:param num: the number of bytes per int
:return the integer array"""
dt = numpy.dtype('>i' + str(num))
return numpy.frombuffer(in_bytes, dt) | 38b97fb9d5ecc5b55caf7c9409e4ab4a406a21d7 | 3,650,647 |
def search_spec(spec, search_key, recurse_key):
"""
Recursively scans spec structure and returns a list of values
keyed with 'search_key' or and empty list. Assumes values
are either list or str.
"""
value = []
if search_key in spec and spec[search_key]:
if isinstance(spec[search_key], str):
value.append(spec[search_key])
else:
value += spec[search_key]
if recurse_key in spec and spec[recurse_key]:
for child_spec in spec[recurse_key]:
value += search_spec(child_spec, search_key, recurse_key)
return sorted(value) | 9d89aacc200e205b0e6cbe49592abfd37158836a | 3,650,648 |
import test
def before_class(home=None, **kwargs):
"""Like @test but indicates this should run before other class methods.
All of the arguments sent to @test work with this decorator as well.
"""
kwargs.update({'run_before_class':True})
return test(home=home, **kwargs) | 3b36e448ec76a2c513a1f87dd29b8027b0693780 | 3,650,649 |
import math
def hellinger_distance_poisson_variants(a_means, b_means, n_samples, sample_distances):
"""
a - The coverage vec for a variant over n_samples
b - The coverage vec for a variant over n_samples
returns average hellinger distance of multiple poisson distributions
"""
# generate distirbutions for each sample
# and calculate divergence between them
# Get the means for each contig
h_geom_mean = []
both_present = []
for i in range(0, n_samples):
# Use this indexing method as zip does not seem to work so well in njit
# Add tiny value to each to avoid division by zero
a_mean = a_means[i] + 1e-6
b_mean = b_means[i] + 1e-6
if a_mean > 1e-6 and b_mean > 1e-6:
both_present.append(i)
if a_mean > 1e-6 or b_mean > 1e-6:
# First component of hellinger distance
h1 = math.exp(-0.5 * ((np.sqrt(a_mean) - np.sqrt(b_mean))**2))
h_geom_mean.append(1 - h1)
if len(h_geom_mean) >= 1:
# convert to log space to avoid overflow errors
d = np.log(np.array(h_geom_mean))
# return the geometric mean
d = np.exp(d.sum() / len(d))
geom_sim = geom_sim_calc(both_present, sample_distances)
d = d ** (1/geom_sim)
else:
d = 1
return d | 555365ea295ef2ff1e18e5c26b6b56b1c939035a | 3,650,651 |
def min_threshold(x, thresh, fallback):
"""Returns x or `fallback` if it doesn't meet the threshold. Note, if you want to turn a hyper "off" below,
set it to "outside the threshold", rather than 0.
"""
return x if (x and x > thresh) else fallback | e92c17aafb8a7c102152d9f31d0a317b285a0ae6 | 3,650,652 |
def get_command(command, meta):
"""Construct the command."""
bits = []
# command to run
bits.append(command)
# connection params
bits.extend(connect_bits(meta))
# database name
if command == 'mysqladmin':
# these commands shouldn't take a database name
return bits
if command == 'pg_restore':
bits.append('--dbname')
if command == 'mysql':
bits.append('--database')
bits.append(meta['path'][1:])
return bits | 0c80072fa70e7943bb7693ad5eb2d24d7078b1cc | 3,650,653 |
def get_common_count(list1, list2):
"""
Get count of common between two lists
:param list1: list
:param list2: list
:return: number
"""
return len(list(set(list1).intersection(list2))) | c149b49e36e81237b775b0de0f19153b5bcf2f99 | 3,650,654 |
def text_present(nbwidget, text="Test"):
"""Check if a text is present in the notebook."""
if WEBENGINE:
def callback(data):
global html
html = data
nbwidget.dom.toHtml(callback)
try:
return text in html
except NameError:
return False
else:
return text in nbwidget.dom.toHtml() | f61f90c6fbbe5251c4839cc3ef82ed1298640345 | 3,650,655 |
def multiFilm(layers, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams=defaultXtraParams):
"""multiFilm(layers, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams={}):
Monte Carlo simulate a spectrum from a multilayer thin film. Layers is a iterable list of \
[material,thickness]. Note the materials must have associated densities."""
tmp = u"MC simulation of a multilayer film [%s] at %0.1f keV%s%s" % (",".join("%0.0f nm of %s" % (1.0e9 * layer[1], layer[0]) for layer in layers), e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildFilm, {"Layers": layers }, xtraParams) | ae586a6860ece7e21f46e221398a462619d16acd | 3,650,656 |
def value_iteration(R, P, gamma, epsilon=1e-6):
"""
Value iteration for discounted problems.
Parameters
----------
R : numpy.ndarray
array of shape (S, A) contaning the rewards, where S is the number
of states and A is the number of actions
P : numpy.ndarray
array of shape (S, A, S) such that P[s,a,ns] is the probability of
arriving at ns by taking action a in state s.
gamma : double
discount factor
epsilon : double
precision
Returns
--------
tuple (Q, V, n_it) containing the epsilon-optimal Q and V functions,
of shapes (S, A) and (S,), respectively, and n_it, the number of iterations
"""
S, A = R.shape
Q = np.zeros((S, A))
Q_aux = np.full((S, A), np.inf)
n_it = 0
while np.abs(Q - Q_aux).max() > epsilon:
Q_aux = Q
Q = bellman_operator(Q, R, P, gamma)
n_it += 1
V = np.zeros(S)
# numba does not support np.max(Q, axis=1)
for ss in range(S):
V[ss] = Q[ss, :].max()
return Q, V, n_it | 4f8286d7519577f77f86b239c14e948eed513a6a | 3,650,657 |
def mock_api_response(response_config={}):
"""Create a mock response from the Github API."""
headers = {
'ETag': 'W/"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"',
'Cache-Control': 'public, max-age=60, s-maxage=60',
'Content-Type': 'application/json; charset=utf-8'
}
api_response = MagicMock(spec=Response)
api_response.content_type = 'application/json'
for k, v in response_config.iteritems():
if k == 'headers':
headers.update(v)
setattr(api_response, k, v)
# Request headers are case insensitive dicts,
# so we need to turn our mock headers into one.
api_response.headers = CaseInsensitiveDict(headers)
return api_response | f79af84cb51ffa063c1db2b70dce99ae61da871a | 3,650,658 |
import tqdm
import json
def load_jsonl(file_path):
""" Load file.jsonl ."""
data_list = []
with open(file_path, mode='r', encoding='utf-8') as fi:
for idx, line in enumerate(tqdm(fi)):
jsonl = json.loads(line)
data_list.append(jsonl)
return data_list | 58bd0dbfa59d08036aa83e62aab47acd2c40ba6e | 3,650,661 |
from io import StringIO
from datetime import datetime
def aurora_forecast():
"""
Get the latest Aurora Forecast from http://swpc.noaa.gov.
Returns
-------
img : numpy array
The pixels of the image in a numpy array.
img_proj : cartopy CRS
The rectangular coordinate system of the image.
img_extent : tuple of floats
The extent of the image ``(x0, y0, x1, y1)`` referenced in
the ``img_proj`` coordinate system.
origin : str
The origin of the image to be passed through to matplotlib's imshow.
dt : datetime
Time of forecast validity.
"""
# GitHub gist to download the example data from
#url = ('https://gist.githubusercontent.com/belteshassar/'
# 'c7ea9e02a3e3934a9ddc/raw/aurora-nowcast-map.txt')
# To plot the current forecast instead, uncomment the following line
url = 'http://services.swpc.noaa.gov/text/aurora-nowcast-map.txt'
response_text = StringIO(urlopen(url).read().decode('utf-8'))
img = np.loadtxt(response_text)
# Read forecast date and time
response_text.seek(0)
for line in response_text:
if line.startswith('Product Valid At:', 2):
dt = datetime.strptime(line[-17:-1], '%Y-%m-%d %H:%M')
img_proj = ccrs.PlateCarree()
img_extent = (-180, 180, -90, 90)
return img, img_proj, img_extent, 'lower', dt | 04ee88aee75f7ac86063c9a57f4e5155378f9085 | 3,650,662 |
def get_number_trips(grouped_counts):
"""
Gets the frequency of number of trips the customers make
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
number of trips
"""
return frequency(grouped_counts.groupby('cust_id').count(), 0) | 4045f10e95fe597e626883c586cc832aa34157c3 | 3,650,663 |
import re
def process_text(text, max_features=200, stopwords=None):
"""Splits a long text into words, eliminates the stopwords and returns
(words, counts) which is necessary for make_wordcloud().
Parameters
----------
text : string
The text to be processed.
max_features : number (default=200)
The maximum number of words.
stopwords : set of strings
The words that will be eliminated.
Notes
-----
There are better ways to do word tokenization, but I don't want to include
all those things.
"""
if stopwords is None:
stopwords = STOPWORDS
d = {}
flags = re.UNICODE if type(text) is unicode else 0
for word in re.findall(r"\w[\w']*", text, flags=flags):
if word.isdigit():
continue
word_lower = word.lower()
if word_lower in stopwords:
continue
# Look in lowercase dict.
if word_lower in d:
d2 = d[word_lower]
else:
d2 = {}
d[word_lower] = d2
# Look in any case dict.
d2[word] = d2.get(word, 0) + 1
d3 = {}
for d2 in d.values():
# Get the most popular case.
first = max(d2.iteritems(), key=item1)[0]
d3[first] = sum(d2.values())
# merge plurals into the singular count (simple cases only)
for key in d3.keys():
if key.endswith('s'):
key_singular = key[:-1]
if key_singular in d3:
val_plural = d3[key]
val_singular = d3[key_singular]
d3[key_singular] = val_singular + val_plural
del d3[key]
words = sorted(d3.iteritems(), key=item1, reverse=True)
words = words[:max_features]
maximum = float(max(d3.values()))
for i, (word, count) in enumerate(words):
words[i] = word, count/maximum
return words | 531c8eea539136701289eea5cd462476ba7fefac | 3,650,664 |
def update_graph_map(n):
"""Update the graph rail network mapbox map.
Returns:
go.Figure: Scattermapbox of rail network graph
"""
return get_graph_map() | 826b12616e9c08b05cecef8d44017a1599ed8f98 | 3,650,665 |
def get_party_leads_sql_string_for_state(party_id, state_id):
"""
:type party_id: integer
"""
str = """ select
lr.candidate_id,
c.fullname as winning_candidate,
lr.constituency_id,
cons.name as constituency,
lr.party_id,
lr.max_votes,
(lr.max_votes-sr.votes) as lead,
sr.candidate_id,
loosing_candidate.fullname as runner_up,
loosing_party.name as runner_up_party,
sr.party_id,
ltr.party_id
from latest_results lr
inner join
latest_runners_up as sr
on
sr.constituency_id = lr.constituency_id
inner join
candidate c
on
c.id = lr.candidate_id
inner join
constituency cons
on
cons.id = lr.constituency_id
inner join party loosing_party
on
loosing_party.id = sr.party_id
inner join candidate loosing_candidate
on
loosing_candidate.id = sr.candidate_id
inner join last_time_winners ltr
on
ltr.constituency_id=lr.constituency_id
where
lr.party_id = %s
and
cons.state_id = %s
and
lr.status = 'COUNTING'
order by
lead DESC""" % (party_id, state_id)
return str; | de1e200cf8651626fff04c2011b3ada12b8b08a7 | 3,650,666 |
import requests
import json
import math
import time
def goods_images(goods_url):
"""
获得商品晒图
Parameters:
goods_url - str 商品链接
Returns:
image_urls - list 图片链接
"""
image_urls = []
productId = goods_url.split('/')[-1].split('.')[0]
# 评论url
comment_url = 'https://sclub.jd.com/comment/productPageComments.action'
comment_params = {'productId':productId,
'score':'0',
'sortType':'5',
'page':'0',
'pageSize':'10',
'isShadowSku':'0',
'fold':'1'}
comment_headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36',
'Referer':goods_url,
'Host': 'sclub.jd.com'}
comment_req = requests.get(url=comment_url, params=comment_params, headers=comment_headers, verify=False)
html = json.loads(comment_req.text)
# 获得晒图个数
imageListCount = html['imageListCount']
# 计算晒图页数,向上取整
pages = math.ceil(imageListCount / 10)
for page in range(1, pages+1):
# 获取晒图图片url
club_url = 'https://club.jd.com/discussion/getProductPageImageCommentList.action'
now = time.time()
now_str = str(now).split('.')
now = now_str[0] + now_str[-1][:3]
club_params = {'productId':productId,
'isShadowSku':'0',
'page':page,
'pageSize':'10',
'_':now}
club_headers = comment_headers
club_req = requests.get(url=club_url, params=club_params, headers=club_headers, verify=False)
html = json.loads(club_req.text)
for img in html['imgComments']['imgList']:
image_urls.append(img['imageUrl'])
# 去重
image_urls = list(set(image_urls))
# 链接合成
image_urls = list(map(lambda x: 'http:'+x, image_urls))
return image_urls | 8ed59e295ebd08788f0083be9941ecd8b09f1d84 | 3,650,667 |
def delete_index_list(base_list, index_list):
"""
根据index_list删除base_list中指定元素
:param base_list:
:param index_list:
:return:
"""
if base_list and index_list:
return [base_list[i] for i in range(len(base_list)) if (i not in index_list)] | 0dd8960d0efc168df42cabb92147f078da362e5e | 3,650,668 |
def not_found():
"""Page not found."""
return make_response(
render_template("404.html"),
404
) | 3bc56677f760937f1767e0465e4dbd0a11eb41d0 | 3,650,669 |
def _traverseAgg(e, visitor=lambda n, v: None):
"""
Traverse a parse-tree, visit each node
if visit functions return a value, replace current node
"""
res = []
if isinstance(e, (list, ParseResults, tuple)):
res = [_traverseAgg(x, visitor) for x in e]
elif isinstance(e, CompValue):
for k, val in e.iteritems():
if val != None:
res.append(_traverseAgg(val, visitor))
return visitor(e, res) | c436dbb548c6a1b7bc6ddc8ea8770cb953e76a72 | 3,650,670 |
def roll(image, delta):
"""Roll an image sideways
(A more detailed explanation goes here.)
"""
xsize, ysize = image.size
delta = delta % xsize
if delta == 0:
print("the delta was 0!")
return image
part1 = image.crop((0, 0, delta, ysize))
part2 = image.crop((delta, 0, xsize, ysize))
image.paste(part2, (0, 0, xsize-delta, ysize))
image.paste(part1, (xsize-delta, 0, xsize, ysize))
return image | b9ccd9659eedfefa5002f064a23c768d36dfdc0a | 3,650,671 |
def make_long_format(path_list, args):
"""Output list of strings in informative line-by-line format like ls -l
Args:
path_list (list of (str, zipfile.Zipinfo)): tuples, one per file
component of zipfile, with relative file path and zipinfo
args (argparse.Namespace): user arguments to script, esp. switches
Returns:
list of str: list of lines to be printed out one at a time
"""
path_str_list = []
if args.human_readable:
# by design of human-readable formatting
max_size_str_len = 4
else:
# find longest length of size str to determine width of string field
max_size_str_len = 0
for path in path_list:
# find longest size string of all paths in pathlist
size_str_len = len(format_file_size(path[1].file_size, args))
if size_str_len > max_size_str_len:
max_size_str_len = size_str_len
for path in path_list:
# extra_data = path[1].extra
# os_creator = path[1].create_system # 3-unix
if path[1].is_dir():
dir_str = "d"
else:
dir_str = "-"
perm_octal = get_zip_perms(path[1])
perm_str = perm_octal2str(perm_octal) + " "
size_str = format_file_size(path[1].file_size, args, max_size_str_len)
size_str += " "
date_str = get_zip_mtime(path[1])
path_str = color_classify(path, args)
path_str_list.append(dir_str + perm_str + size_str + date_str + path_str)
return path_str_list | 68a30c16409c98e92a31b21a911cbca7ca9ef7c4 | 3,650,672 |
import unicodedata
import re
def is_name_a_title(name, content):
"""Determine whether the name property represents an explicit title.
Typically when parsing an h-entry, we check whether p-name ==
e-content (value). If they are non-equal, then p-name likely
represents a title.
However, occasionally we come across an h-entry that does not
provide an explicit p-name. In this case, the name is
automatically generated by converting the entire h-entry content
to plain text. This definitely does not represent a title, and
looks very bad when displayed as such.
To handle this case, we broaden the equality check to see if
content is a subset of name. We also strip out non-alphanumeric
characters just to make the check a little more forgiving.
:param str name: the p-name property that may represent a title
:param str content: the plain-text version of an e-content property
:return: True if the name likely represents a separate, explicit title
"""
def normalize(s):
if not isinstance(s, string_type):
s = s.decode('utf-8')
s = unicodedata.normalize('NFKD', s)
s = s.lower()
s = re.sub('[^a-z0-9]', '', s)
return s
if not content:
return True
if not name:
return False
return normalize(content) not in normalize(name) | 2a8d3191920fba0d92670a3d520bfdf6836dbe69 | 3,650,673 |
from datetime import datetime
import traceback
def insertTweet(details, insertDuplicates=True):
""" Adds tweet to database
@param details {Dict} contains tweet details
@param insertDuplicates {Boolean} optional, if true it
will insert even if already exists
"""
try:
if not insertDuplicates:
tweet_results = get_tweet_by_id(details['itemid'])
if tweet_results != None:
logger.info(tweet_results)
return False
tweet = Tweet(
twitter_handle=details['handle'],
tweet_time=datetime.datetime.utcfromtimestamp(details['time']),
tweet_text=details['text'],
data_type=details['type'],
data_id=details['itemid'],
retweets=details['retweets'],
favorites=details['favorites'],
status=1
)
session.add(tweet)
session.commit()
addTweetToHandler(tweet,details['handle'])
return True
except Exception as e:
traceback.print_exc()
traceback.print_stack()
print("ERROR OCCURED WHEN INSERTING TWEET")
print(e)
session.rollback()
return False | e11aba2fecd3d2e0a8f21f25ea1f920512949bdc | 3,650,674 |
from typing import OrderedDict
def return_embeddings(embedding: str, vocabulary_size: int, embedding_dim: int,
worddicts: OrderedDict) -> np.ndarray:
"""Create array of word embeddings."""
word_embeddings = np.zeros((vocabulary_size, dim_word))
with open(embedding, 'r') as f:
for line in f:
words=line.split()
word = words[0]
vector = words[1:]
len_vec = len(vector)
if(len_vec>300):
diff = len_vec-300
word = word.join(vector[:diff])
vector = vector[diff:]
if word in worddicts and worddicts[word] < vocabulary_size:
vector = [float(x) for x in vector]
word_embeddings[worddicts[word], :] = vector[0:300]
return word_embeddings | 86379e2cc9c343733464bea207dc3f41b4dd7601 | 3,650,676 |
import sympy
def symLink(twist, dist, angle, offset):
"""
Transform matrix of this link with DH parameters.
(Use symbols)
"""
twist = twist * sympy.pi / 180
T1 = sympy.Matrix([
[1, 0, 0, dist],
[0, sympy.cos(twist), -sympy.sin(twist), 0],
[0, sympy.sin(twist), sympy.cos(twist), 0],
[0, 0, 0, 1]])
# T1[sympy.abs(T1) < 1e-3] = 0
T2 = sympy.Matrix([
[sympy.cos(angle), -sympy.sin(angle), 0, 0],
[sympy.sin(angle), sympy.cos(angle), 0, 0],
[0, 0, 1, offset],
[0, 0, 0, 1]])
return T1 * T2 | a6e2ac09866f2b54ffb33da681ba9d19e74e57f0 | 3,650,677 |
import aiohttp
from typing import Tuple
from typing import Dict
from typing import Any
from typing import Sequence
async def _parse_action_body(service: UpnpServerService, request: aiohttp.web.Request) -> Tuple[str, Dict[str, Any]]:
"""Parse action body."""
# Parse call.
soap = request.headers.get("SOAPAction", "").strip('"')
try:
_, action_name = soap.split("#")
data = await request.text()
root_el: ET.Element = DET.fromstring(data)
body_els: Sequence[ET.Element] = root_el.find("s:Body", NAMESPACES)
rpc_el = body_els[0]
except Exception as exc:
raise aiohttp.web.HTTPBadRequest(reason="InvalidSoap") from exc
if action_name not in service.actions:
raise aiohttp.web.HTTPBadRequest(reason="InvalidAction")
kwargs: Dict[str, Any] = {}
action = service.action(action_name)
for arg in rpc_el:
action_arg = action.argument(arg.tag, direction="in")
if action_arg is None:
raise aiohttp.web.HTTPBadRequest(reason="InvalidArg")
state_var = action_arg.related_state_variable
kwargs[arg.tag] = state_var.coerce_python(arg.text)
return action_name, kwargs | d5f390d956d726ffca0d37891815b8ccf488a826 | 3,650,678 |
import json
def get_tc_json():
"""Get the json for this testcase."""
try:
with open(GLOBAL_INPUT_JSON_PATH) as json_file:
tc = json.load(json_file)
except Exception:
return_error('Could not custom_validator_input.json')
return tc | de19278f5edb415d40e383d2ad08dfc6e968cb81 | 3,650,679 |
def dualgauss(x, x1, x2, w1, w2, a1, a2, c=0):
"""
Sum of two Gaussian distributions. For curve fitting.
Parameters
----------
x: np.array
Axis
x1: float
Center of 1st Gaussian curve
x2: float
Center of 2nd Gaussian curve
w1: float
Width of 1st Gaussian curve
w2: float
Width of 2nd Gaussian curve
a1: float
Amplitude of 1st Gaussian curve
a2: float
Amplitude of 2nd Gaussian curve
c: float, optional
Offset, defaults to 0
"""
return a1*np.exp(-0.5*((x-x1)/w1)**2)+a2*np.exp(-0.5*((x-x2)/w2)**2) + c | d60d63ad0776aa6d5babfe5e963503f18dca0c3e | 3,650,680 |
def pdg_format3( value , error1 , error2 , error3 , latex = False , mode = 'total' ) :
"""Round value/error accoridng to PDG prescription and format it for print
@see http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf
@see section 5.3 of doi:10.1088/0954-3899/33/1/001
Quote:
The basic rule states that
- if the three highest order digits of the error lie between 100 and 354, we round to two significant digits.
- If they lie between 355 and 949, we round to one significant digit.
- Finally, if they lie between 950 and 999, we round up to 1000 and keep two significant digits.
In all cases, the central value is given with a precision that matches that of the error.
>>> value, error1, error2 = ...
>>> print ' Rounded value/error is %s ' % pdg_format2 ( value , error1 , error2 , True )
"""
error = ref_error ( mode , error1 , error2 , error3 )
val , err , q , ecase = pdg_round__ ( value , error )
if ecase <= 0 or ( not isfinite ( error1 ) ) or ( not isfinite ( error2 ) ) or ( not isfinite ( error3 ) ) :
if not isfinite ( val ) :
return ( '%+g \\pm %-g \\pm %-g \\pm %-g ' % ( val , error1 , error2 , error3 ) ) if latex else \
( '%+g +/- %-g +/- %-g +/- %-g' % ( val , error1 , error2 , error3 ) )
else :
qv , bv = _frexp10_ ( val )
if 0 != bv :
scale = 1.0 / 10**bv
if latex : return '(%+.2f \\pm %-s \\pm %-s)\\times 10^{%d}' % ( qv , error1 * scale , error2 * scale , error3 * scale , bv )
else : return ' %+.2f +/- %-s +/ %-s +/- %-s )*10^{%d} ' % ( qv , error1 * scale , error2 * scale , error3 * scale , bv )
else :
if latex : return ' %+.2f \\pm %-s \\pm %-s \\pm %-s ' % ( qv , error1 , error2 , error3 )
else : return ' %+.2f +/- %-s +/- %-s +/- %-s ' % ( qv , error1 , error2 , error3 )
qe , be = _frexp10_ ( error )
a , b = divmod ( be , 3 )
if 1 == ecase :
err1 = round_N ( error1 , 2 ) if isclose ( error1 , error , 1.e-2 ) else err
err2 = round_N ( error2 , 2 ) if isclose ( error2 , error , 1.e-2 ) else err
err3 = round_N ( error3 , 2 ) if isclose ( error3 , error , 1.e-2 ) else err
if 0 == b :
nd = 1
elif 1 == b :
nd = 3
a += 1
elif 2 == b :
a += 1
nd = 2
elif 2 == ecase :
err1 = round_N ( error1 , 1 ) if isclose ( error1 , error , 1.e-2 ) else err
err2 = round_N ( error2 , 1 ) if isclose ( error2 , error , 1.e-2 ) else err
err3 = round_N ( error3 , 1 ) if isclose ( error3 , error , 1.e-2 ) else err
if 0 == b :
nd = 0
if 2 == a % 3 :
nd = 3
a = a + 1
elif 1 == b :
nd = 2
a += 1
elif 2 == b :
nd = 1
a += 1
elif 3 == ecase :
err1 = round_N ( error1 , 2 ) if isclose ( error1 , error , 1.e-2 ) else err
err2 = round_N ( error2 , 2 ) if isclose ( error2 , error , 1.e-2 ) else err
err3 = round_N ( error3 , 2 ) if isclose ( error3 , error , 1.e-2 ) else err
if 0 == b :
nd = 0
if 2 == a % 3 :
nd = 3
a = a + 1
elif 1 == b :
nd = 2
a += 1
elif 2 == b :
nd = 1
a += 1
if 0 == a :
if latex: fmt = '(%%+.%df \\pm %%.%df \\pm %%.%df \\pm %%.%df)' % ( nd , nd , nd , nd )
else : fmt = ' %%+.%df +/- %%.%df +/- %%.%df +/- %%.%df ' % ( nd , nd , nd . nd )
return fmt % ( val , err )
if latex: fmt = '(%%+.%df \\pm %%.%df \\pm %%.%df \\pm %%.%df)\\times 10^{%%d}' % ( nd , nd , nd , nd )
else : fmt = '(%%+.%df +/- %%.%df +/- %%.%df +/- %%.%df)*10^{%%d}' % ( nd , nd , nd , nd )
scale = 1.0/10**(3*a)
return fmt % ( val * scale , err1 * scale , err2 * scale , err3 * scale , 3 * a ) | 9d75007e19d60caac14a2a830800e7db215c0de6 | 3,650,681 |
from datetime import datetime
def getChinaHoliday(t):
"""找出距离输入日期最近的中国节日,输出距离的天数"""
date_time = datetime.datetime.strptime(t, '%d %B %Y')
y = date_time.year
# 中国阳历节日
sh = [
(y, 1, 1), # 元旦
(y, 4, 5), # 清明
(y, 5, 1), # 五一劳动节
(y, 10, 1) # 国庆节
]
# 中国阴历节日
lh = [
(y, 1, 1), # 大年初一(春节)
(y, 5, 5), # 端午节
(y, 8, 15) # 中秋节
]
res = 365
for h in sh:
hd = datetime.datetime(h[0], h[1], h[2], 0, 0, 0)
ds = (date_time-hd).days
if abs(ds) < res: # 距离输入的日期最近的阳历节日
res = abs(ds)
for h in lh:
ld = lunardate.LunarDate(h[0], h[1], h[2], 0).toSolarDate()
hd = datetime.datetime(ld.year, ld.month, ld.day, 0, 0, 0)
ds = (date_time-hd).days
if abs(ds) < res: # 距离输入的日期最近的阴历节日
res = abs(ds)
# print t,res
return res
pass | bc9520f56135d86cf196bfe30bde0ea645377f45 | 3,650,682 |
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
:param str mimetype: MIME type
:returns: 4 element tuple for MIME type, subtype, suffix and parameters
:rtype: tuple
Example:
>>> parse_mimetype('text/html; charset=utf-8')
('text', 'html', '', {'charset': 'utf-8'})
"""
if not mimetype:
return '', '', '', {}
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = dict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return mtype, stype, suffix, params | a9abfde73528e6f76cca633efe3d4c881dccef82 | 3,650,683 |
def terraform_state_bucket(config):
"""Get the bucket name to be used for the remote Terraform state
Args:
config (dict): The loaded config from the 'conf/' directory
Returns:
string: The bucket name to be used for the remote Terraform state
"""
# If a bucket name is specified for the remote Terraform state, we can assume the bucket
# should NOT be created
default_name = DEFAULT_TERRAFORM_STATE_BUCKET_SUFFIX.format(
config['global']['account']['prefix']
)
if 'terraform' not in config['global']:
return default_name, True # Use the default name and create the bucket
bucket_name = config['global']['terraform'].get(
'bucket_name',
default_name
)
return bucket_name, bucket_name == default_name | 443ae393896d180f3e419db7a6b7e346dca0655c | 3,650,684 |
def get_binary_matrix(gene_expr, libraries):
"""
Get binary matrix with genes as rows and pathways as columns.
If a gene is found in a given pathway, it is given a value of
1. Else, 0. Only the list of genes in common between that found
in the gene set libraries and the current dataset are used.
"""
function_to_genes = {}
set_genes = set()
for lib in libraries:
f2g, genes = gene_set_dictionaries(lib)
function_to_genes.update(f2g)
set_genes = set_genes | set(genes)
common_genes = list(set_genes & set(gene_expr))
binary_matrix = gs_binary_matrix(function_to_genes, set_genes).loc[common_genes]
return binary_matrix | 53f39909efc1dfb083cba734a01f77d181f4c36c | 3,650,685 |
def get_tip_downvotes(tips_id):
"""
GET function for retrieving all User objects that have downvoted a tip
"""
tip = Tips.objects.get(id=tips_id)
tips_downvotes = (tip.to_mongo())["downvotes"]
tips_downvotes_list = [
User.objects.get(id=str(user)).to_mongo() for user in tips_downvotes
]
response = {"users": tips_downvotes_list}
return create_response(data=response) | b528be2bd74169a4baff14ecb473ef12d8554be9 | 3,650,686 |
from typing import List
from typing import Dict
def get_placements(
big_graph: nx.Graph, small_graph: nx.Graph, max_placements=100_000
) -> List[Dict]:
"""Get 'placements' mapping small_graph nodes onto those of `big_graph`.
This function considers monomorphisms with a restriction: we restrict only to unique set
of `big_graph` qubits. Some monomorphisms may be basically
the same mapping just rotated/flipped which we purposefully exclude. This could
exclude meaningful differences like using the same qubits but having the edges assigned
differently, but it prevents the number of placements from blowing up.
Args:
big_graph: The parent, super-graph. We often consider the case where this is a
nx.Graph representation of a Device whose nodes are `cirq.Qid`s like `GridQubit`s.
small_graph: The subgraph. We often consider the case where this is a NamedTopology
graph.
max_placements: Raise a value error if there are more than this many placement
possibilities. It is possible to use `big_graph`, `small_graph` combinations
that result in an intractable number of placements.
Raises:
ValueError: if the number of placements exceeds `max_placements`.
Returns:
A list of placement dictionaries. Each dictionary maps the nodes in `small_graph` to
nodes in `big_graph` with a monomorphic relationship. That's to say: if an edge exists
in `small_graph` between two nodes, it will exist in `big_graph` between the mapped nodes.
"""
matcher = nx.algorithms.isomorphism.GraphMatcher(big_graph, small_graph)
# de-duplicate rotations, see docstring.
dedupe = {}
for big_to_small_map in matcher.subgraph_monomorphisms_iter():
dedupe[frozenset(big_to_small_map.keys())] = big_to_small_map
if len(dedupe) > max_placements:
# coverage: ignore
raise ValueError(
f"We found more than {max_placements} placements. Please use a "
f"more constraining `big_graph` or a more constrained `small_graph`."
)
small_to_bigs = []
for big in sorted(dedupe.keys()):
big_to_small_map = dedupe[big]
small_to_big_map = {v: k for k, v in big_to_small_map.items()}
small_to_bigs.append(small_to_big_map)
return small_to_bigs | fad71c888639ba29c0b0d2d61ddeff2a2c1d8653 | 3,650,687 |
import inspect
import six
def _filter_baseanalysis_kwargs(function, kwargs):
"""
create two dictionaries with kwargs separated for function and AnalysisBase
Parameters
----------
function : callable
function to be called
kwargs : dict
keyword argument dictionary
Returns
-------
base_args : dict
dictionary of AnalysisBase kwargs
kwargs : dict
kwargs without AnalysisBase kwargs
Raises
------
ValueError : if ``function`` has the same kwargs as ``BaseAnalysis``
"""
base_argspec = inspect.getargspec(AnalysisBase.__init__)
n_base_defaults = len(base_argspec.defaults)
base_kwargs = {name: val
for name, val in zip(base_argspec.args[-n_base_defaults:],
base_argspec.defaults)}
argspec = inspect.getargspec(function)
for base_kw in six.iterkeys(base_kwargs):
if base_kw in argspec.args:
raise ValueError(
"argument name '{}' clashes with AnalysisBase argument."
"Now allowed are: {}".format(base_kw, list(base_kwargs.keys())))
base_args = {}
for argname, default in six.iteritems(base_kwargs):
base_args[argname] = kwargs.pop(argname, default)
return base_args, kwargs | a674c640618ebba3d2c29fec0458773344c84be6 | 3,650,690 |
def torch_to_flax(torch_params, get_flax_keys):
"""Convert PyTorch parameters to nested dictionaries"""
def add_to_params(params_dict, nested_keys, param, is_conv=False):
if len(nested_keys) == 1:
key, = nested_keys
params_dict[key] = np.transpose(param, (2, 3, 1, 0)) if is_conv else np.transpose(param)
else:
assert len(nested_keys) > 1
first_key = nested_keys[0]
if first_key not in params_dict:
params_dict[first_key] = {}
add_to_params(params_dict[first_key], nested_keys[1:], param, ('conv' in first_key and \
nested_keys[-1] != 'bias'))
def add_to_state(state_dict, keys, param):
key_str = ''
for k in keys[:-1]:
key_str += f"/{k}"
if key_str not in state_dict:
state_dict[key_str] = {}
state_dict[key_str][keys[-1]] = param
flax_params, flax_state = {}, {}
for key, tensor in torch_params.items():
if flax_keys[-1] is None:
continue
flax_keys = get_flax_keys(key.split('.'))
if flax_keys[-1] == 'mean' or flax_keys[-1] == 'var':
add_to_state(flax_state, flax_keys, tensor.detach().numpy())
else:
add_to_params(flax_params, flax_keys, tensor.detach().numpy())
return flax_params, flax_state | fd87617e3e0db491ff313218883961a1c2aa9d0f | 3,650,691 |
from typing import Union
from pathlib import Path
from typing import Optional
def subset_shape(
ds: Union[xarray.DataArray, xarray.Dataset],
shape: Union[str, Path, gpd.GeoDataFrame],
raster_crs: Optional[Union[str, int]] = None,
shape_crs: Optional[Union[str, int]] = None,
buffer: Optional[Union[int, float]] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
first_level: Optional[Union[float, int]] = None,
last_level: Optional[Union[float, int]] = None,
) -> Union[xarray.DataArray, xarray.Dataset]:
"""Subset a DataArray or Dataset spatially (and temporally) using a vector shape and date selection.
Return a subset of a DataArray or Dataset for grid points falling within the area of a Polygon and/or
MultiPolygon shape, or grid points along the path of a LineString and/or MultiLineString. If the shape
consists of several disjoint polygons, the output is cut to the smallest bbox including all
polygons.
Parameters
----------
ds : Union[xarray.DataArray, xarray.Dataset]
Input values.
shape : Union[str, Path, gpd.GeoDataFrame]
Path to shape file, or directly a geodataframe. Supports formats compatible with geopandas.
raster_crs : Optional[Union[str, int]]
EPSG number or PROJ4 string.
shape_crs : Optional[Union[str, int]]
EPSG number or PROJ4 string.
buffer : Optional[Union[int, float]]
Buffer the shape in order to select a larger region stemming from it. Units are based on the shape degrees/metres.
start_date : Optional[str]
Start date of the subset.
Date string format -- can be year ("%Y"), year-month ("%Y-%m") or year-month-day("%Y-%m-%d").
Defaults to first day of input data-array.
end_date : Optional[str]
End date of the subset.
Date string format -- can be year ("%Y"), year-month ("%Y-%m") or year-month-day("%Y-%m-%d").
Defaults to last day of input data-array.
first_level : Optional[Union[int, float]]
First level of the subset.
Can be either an integer or float.
Defaults to first level of input data-array.
last_level : Optional[Union[int, float]]
Last level of the subset.
Can be either an integer or float.
Defaults to last level of input data-array.
Returns
-------
Union[xarray.DataArray, xarray.Dataset]
A subset of `ds`
Notes
-----
If no CRS is found in the shape provided (e.g. RFC-7946 GeoJSON, https://en.wikipedia.org/wiki/GeoJSON),
assumes a decimal degree datum (CRS84). Be advised that EPSG:4326 and OGC:CRS84 are not identical as axis order of
lat and long differs between the two (for more information, see: https://github.com/OSGeo/gdal/issues/2035).
Examples
--------
>>> import xarray as xr # doctest: +SKIP
>>> from clisops.core.subset import subset_shape # doctest: +SKIP
>>> pr = xr.open_dataset(path_to_pr_file).pr # doctest: +SKIP
...
# Subset data array by shape
>>> prSub = subset_shape(pr, shape=path_to_shape_file) # doctest: +SKIP
...
# Subset data array by shape and single year
>>> prSub = subset_shape(pr, shape=path_to_shape_file, start_date='1990-01-01', end_date='1990-12-31') # doctest: +SKIP
...
# Subset multiple variables in a single dataset
>>> ds = xr.open_mfdataset([path_to_tasmin_file, path_to_tasmax_file]) # doctest: +SKIP
>>> dsSub = subset_shape(ds, shape=path_to_shape_file) # doctest: +SKIP
"""
wgs84 = CRS(4326)
# PROJ4 definition for WGS84 with longitudes ranged between -180/+180.
wgs84_wrapped = CRS.from_string(
"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs lon_wrap=180"
)
if isinstance(ds, xarray.DataArray):
ds_copy = ds._to_temp_dataset()
else:
ds_copy = ds.copy()
if isinstance(shape, gpd.GeoDataFrame):
poly = shape.copy()
else:
poly = gpd.GeoDataFrame.from_file(shape)
if buffer is not None:
poly.geometry = poly.buffer(buffer)
# Get the shape's bounding box.
minx, miny, maxx, maxy = poly.total_bounds
lon_bnds = (minx, maxx)
lat_bnds = (miny, maxy)
# If polygon doesn't cross prime meridian, subset bbox first to reduce processing time
# Only case not implemented is when lon_bnds cross the 0 deg meridian but dataset grid has all positive lons
try:
ds_copy = subset_bbox(ds_copy, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
except ValueError as e:
raise ValueError(
"No grid cell centroids found within provided polygon bounding box. "
'Try using the "buffer" option to create an expanded area.'
) from e
except NotImplementedError:
pass
lon = get_lon(ds_copy)
lat = get_lat(ds_copy)
if start_date or end_date:
ds_copy = subset_time(ds_copy, start_date=start_date, end_date=end_date)
if first_level or last_level:
ds_copy = subset_level(ds_copy, first_level=first_level, last_level=last_level)
# Determine whether CRS types are the same between shape and raster
if shape_crs is not None:
try:
shape_crs = CRS.from_user_input(shape_crs)
except ValueError:
raise
else:
try:
shape_crs = CRS(poly.crs)
except CRSError:
poly.crs = wgs84
shape_crs = wgs84
wrap_lons = False
if raster_crs is not None:
try:
raster_crs = CRS.from_user_input(raster_crs)
except ValueError:
raise
else:
if np.min(lat_bnds) < -90 or np.max(lat_bnds) > 90:
raise ValueError("Latitudes exceed domain of WGS84 coordinate system.")
if np.min(lon_bnds) < -180 or np.max(lon_bnds) > 180:
raise ValueError("Longitudes exceed domain of WGS84 coordinate system.")
try:
# Extract CF-compliant CRS_WKT from crs variable.
raster_crs = CRS.from_cf(ds_copy.crs.attrs)
except AttributeError as e:
# This is guessing that lons are wrapped around at 180+ but without much information, this might not be true
if np.min(lon) >= -180 and np.max(lon) <= 180:
raster_crs = wgs84
elif np.min(lon) >= 0 and np.max(lon) <= 360:
wrap_lons = True
raster_crs = wgs84_wrapped
else:
raise CRSError(
"Raster CRS is not known and does not resemble WGS84."
) from e
_check_crs_compatibility(shape_crs=shape_crs, raster_crs=raster_crs)
mask_2d = create_mask(x_dim=lon, y_dim=lat, poly=poly, wrap_lons=wrap_lons).clip(
1, 1
)
# 1 on the shapes, NaN elsewhere.
# We simply want to remove the 0s from the zeroth shape, for our outer mask trick below.
if np.all(mask_2d.isnull()):
raise ValueError(
f"No grid cell centroids found within provided polygon bounds ({poly.bounds}). "
'Try using the "buffer" option to create an expanded areas or verify polygon.'
)
sp_dims = set(mask_2d.dims) # Spatial dimensions
# Find the outer mask. When subsetting unconnected shapes,
# we dont want to drop the inner NaN regions, it may cause problems downstream.
inner_mask = xarray.full_like(mask_2d, True, dtype=bool)
for dim in sp_dims:
# For each dimension, propagate shape indexes in either directions
# Then sum on the other dimension. You get a step function going from 0 to X.
# The non-zero part that left and right have in common is the "inner" zone.
left = mask_2d.bfill(dim).sum(sp_dims - {dim})
right = mask_2d.ffill(dim).sum(sp_dims - {dim})
# True in the inner zone, False in the outer
inner_mask = inner_mask & (left != 0) & (right != 0)
# inner_mask including the shapes
inner_mask = mask_2d.notnull() | inner_mask
# loop through variables
for v in ds_copy.data_vars:
if set.issubset(sp_dims, set(ds_copy[v].dims)):
# 1st mask values outside shape, then drop values outside inner_mask
ds_copy[v] = ds_copy[v].where(mask_2d.notnull())
# Remove grid points outside the inner mask
# Then extract the coords.
# Using a where(inner_mask) on ds_copy triggers warnings with dask, sel seems safer.
mask_2d = mask_2d.where(inner_mask, drop=True)
for dim in sp_dims:
ds_copy = ds_copy.sel({dim: mask_2d[dim]})
# Add a CRS definition using CF conventions and as a global attribute in CRS_WKT for reference purposes
ds_copy.attrs["crs"] = raster_crs.to_string()
ds_copy["crs"] = 1
ds_copy["crs"].attrs.update(raster_crs.to_cf())
for v in ds_copy.variables:
if {lat.name, lon.name}.issubset(set(ds_copy[v].dims)):
ds_copy[v].attrs["grid_mapping"] = "crs"
if isinstance(ds, xarray.DataArray):
return ds._from_temp_dataset(ds_copy)
return ds_copy | 2d751cd4a9300645cb9bc7b1b353dc29da388f96 | 3,650,692 |
def plot_record_static(
record,
save=True,
scale=1000,
select_kw={},
x_prop='wavenumber',
**kwargs
):
"""Figure of Static data from a record.
High level function.
record: Record to get data from
save: Boolean, Save figure
scale: Scale y axis.
select_kw: dict passed to select method
Returns
fig and ax.
"""
fig, ax = plt.subplots(num='{}_static'.format(record.name))
fig.clf()
select_kw.setdefault('delay_mean', True)
select_kw.setdefault('frame_med', True)
select_kw.setdefault('prop', 'unpumped')
data = record.select(**select_kw)
plot_spec(record.select(x_prop), scale*data, **kwargs)
plt.title("{}".format(record.lname))
fname = 'figures/{}_static.pdf'.format(record.name)
print(fname)
if save:
plt.savefig(fname)
print("saved")
return fig, ax | 4a25068f7df9450870af81fb2507f6262db61b42 | 3,650,693 |
def logmelspectrogram(wave: np.ndarray, conf: ConfMelspec) -> np.ndarray:
"""Convert a waveform to a scaled mel-frequency log-amplitude spectrogram.
Args:
wave::ndarray[Time,] - waveform
conf - Configuration
Returns::(Time, Mel_freq) - mel-frequency log(Bel)-amplitude spectrogram
"""
# mel-frequency linear-amplitude spectrogram :: [Freq=n_mels, T_mel]
mel_freq_amp_spec = librosa.feature.melspectrogram(
y=wave,
sr=conf.sampling_rate,
n_fft=conf.n_fft,
hop_length=conf.hop_length,
n_mels=conf.n_mels,
fmin=conf.fmin,
fmax=conf.fmax,
# norm=,
power=1,
pad_mode="reflect",
)
# [-inf, `min_db`, `ref_db`, +inf] dB(ref=1,power) => [`min_db_rel`/20, `min_db_rel`/20, 0, +inf]
min_db = conf.ref_db + conf.min_db_rel
ref, amin = db_to_linear(conf.ref_db), db_to_linear(min_db)
# `power_to_db` hack for linear-amplitude spec to log-amplitude spec conversion
mel_freq_log_amp_spec = librosa.power_to_db(mel_freq_amp_spec, ref=ref, amin=amin, top_db=None)
mel_freq_log_amp_spec_bel = mel_freq_log_amp_spec/10.
mel_freq_log_amp_spec_bel = mel_freq_log_amp_spec_bel.T
return mel_freq_log_amp_spec_bel | d4849092495b097b8efb292826eb020c8775157c | 3,650,694 |
def get_trainer_config(env_config, train_policies, num_workers=9, framework="tf2"):
"""Build configuration for 1 run."""
# trainer config
config = {
"env": env_name, "env_config": env_config, "num_workers": num_workers,
# "multiagent": {"policy_mapping_fn": lambda x: x, "policies": policies,
# "policies_to_train": train_policies},
"framework": framework,
"train_batch_size": 512,
'batch_mode': 'truncate_episodes',
"callbacks": TraceMallocCallback,
"lr": 0.0,
"num_gpus": 1,
}
return config | 4452d0e037b4bc49a5b027d4f0f6dd2993eceac2 | 3,650,695 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.