content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def url(method):
"""对于每一个URL的请求访问装饰器,在出错时返回对应的信息"""
@wraps(method)
def error_handler(*args, **kwargs):
try:
return success(method(*args, **kwargs))
except RequestError as r:
current_app.logger.exception(r)
# 返回对应异常类的字符串文档
return failed(reason=r.err_num(), message=r.err_msg())
except Exception as e:
current_app.logger.exception(e)
return failed()
return error_handler | cb2c36981372738b6b708d4e28566d4bb8ffcd90 | 3,654,600 |
def is_abbreviation(sentence):
"""
Evaluate a word to be an abbreviation if the immediate word before the
period contains a capital letter and not a single word sentence.
"""
sentence_split = sentence.split(" ")
if len(sentence_split) == 1:
return False
elif len(sentence_split[-1]) <= 3 and \
any(x.isupper() for x in sentence_split[-1]):
return True
else:
return False | a6f6ceae5b3b9adb7817a913e80a6af86b6d27d5 | 3,654,601 |
def compose_redis_key(vim_name, identifier, identifier_type="vdu"):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
identifier (str): The VDU or VNF uuid (NFVI based)
identifier_type (str): the identifier type. Default type is vdu. Also vnf is supported.
Returns:
str: the key for redis
"""
if identifier_type == "vnf":
return "{}:vnf#{}".format(vim_name.lower(), identifier)
else:
return "{}:{}".format(vim_name.lower(), identifier) | e9a03cf9ff704fea8b9cdf75c59695568e366649 | 3,654,602 |
def calGridID(locs, id, SPLIT = 0.0005):
"""
根据城市网格编号还原经纬度信息
:param locs:
:param id:
:param SPLIT=0.05:
"""
centerincrement = SPLIT/2.0
LNGNUM = int((locs['east'] - locs['west']) / SPLIT + 1)
latind = int(id / LNGNUM)
lngind = id - latind * LNGNUM
lat = (locs['south'] + latind * SPLIT)
lng = (locs['west'] + lngind * SPLIT)
lngcen = (lng + centerincrement)
latcen = (lat + centerincrement)
return "%.3f,%.3f" % (latcen, lngcen)
# {
# 'lat': latcen,
# 'lng': lngcen
# } | 8df119ff82bc1d3c14dbdfe358af6d956d6a52a2 | 3,654,603 |
def linear(x, *p):
"""[summary]
Arguments:
x {[type]} -- [description]
Returns:
[type] -- [description]
"""
return p[0] * x + p[1] | 07ef5fc7c5e78148528cccd09fe14c37cad22ead | 3,654,604 |
def convert_price_text(t):
"""
convert "$175/month' to 175
:param t:
:return: price, unit (i.e. 175, 'month')
"""
tok = t.split('$')[1]
if '/' in tok:
price, unit = tok.split('/')
else:
price = tok
unit = None
return float(price.strip().strip('$').replace(',', '')), unit | b42d26dcd4eb1b2c2f8c5a63ddc9d48469e30a52 | 3,654,605 |
async def async_setup(hass, config):
"""Set up the WWLLN component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
latitude = conf.get(CONF_LATITUDE, hass.config.latitude)
longitude = conf.get(CONF_LONGITUDE, hass.config.longitude)
identifier = '{0}, {1}'.format(latitude, longitude)
if identifier in configured_instances(hass):
return True
if hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
unit_system = CONF_UNIT_SYSTEM_IMPERIAL
else:
unit_system = CONF_UNIT_SYSTEM_METRIC
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data={
CONF_LATITUDE: latitude,
CONF_LONGITUDE: longitude,
CONF_RADIUS: conf[CONF_RADIUS],
CONF_WINDOW: conf[CONF_WINDOW],
CONF_UNIT_SYSTEM: unit_system,
}))
return True | 3f0a4f5a017340780c8c1122425804e7862c3d0f | 3,654,606 |
from typing import Any
def __are_nearly_overlapped(
plane_predicted: NDArray[Any, np.int32],
plane_gt: NDArray[Any, np.int32],
required_overlap: np.float64,
) -> (bool, bool):
"""
Calculate if planes are overlapped enough (required_overlap %) to be used for PP-PR metric
:param required_overlap: overlap threshold which will b checked to say that planes overlaps
:param plane_predicted: predicted segmentation
:param plane_gt: ground truth segmentation
:return: true if planes are overlapping by required_overlap % or more, false otherwise
"""
intersection = np.intersect1d(plane_predicted, plane_gt)
return (
intersection.size / plane_predicted.size >= required_overlap
and intersection.size / plane_gt.size >= required_overlap,
intersection.size > 0,
) | 7b686e7bb4b18e4e2e116cdfd14878acbcc4c92d | 3,654,607 |
def _get_prob_k_given_L(B, N=None):
"""
Helper function.
"""
if N is None:
N = int(B[0, 1])
return B / N | be1d0848b148b3413aaee2c5549bd6063e1f2d33 | 3,654,608 |
def base64_encode(s):
"""unicode-safe base64
base64 API only talks bytes
"""
if not isinstance(s, bytes):
s = s.encode('ascii', 'replace')
encoded = encodebytes(s)
return encoded.decode('ascii') | 6ef0722014aa56e22de102aa0ce8286416640f86 | 3,654,609 |
def _unpack_tableswitch(bc, offset):
"""
function for unpacking the tableswitch op arguments
"""
jump = (offset % 4)
if jump:
offset += (4 - jump)
(default, low, high), offset = _unpack(_struct_iii, bc, offset)
joffs = list()
for _index in xrange((high - low) + 1):
j, offset = _unpack(_struct_i, bc, offset)
joffs.append(j)
return (default, low, high, joffs), offset | af08ab85def5bf132227f20da8cb6032e2a9dff1 | 3,654,610 |
def force_orders(self, **kwargs):
"""User's Force Orders (USER_DATA)
GET /fapi/v1/forceOrders
https://binance-docs.github.io/apidocs/futures/en/#user-39-s-force-orders-user_data
Keyword Args:
symbol (str, optional)
autoCloseType (str, optional): "LIQUIDATION" for liquidation orders, "ADL" for ADL orders.
startTime (int, optional)
endTime (int, optional)
limit (int, optional): Default 50; max 100.
recvWindow (int, optional)
Notes:
If "autoCloseType" is not sent, orders with both of the types will be returned
If "startTime" is not sent, data within 7 days before "endTime" can be queried
"""
payload = {**kwargs}
url_path = "/fapi/v1/forceOrders"
return self.sign_request("GET", url_path, payload) | 6e848820e17e54df0f275ec4087d9c609d4e08fa | 3,654,611 |
def prosp_power_analysis_norm(d, sigma, pow_lev, alpha, direction):
"""
This function conducts pre-testing power analysis and
calculates the minimally required sample size for a normal sample.
@param d: difference between the mean differences under H1 and H0
@param sigma: standard deviation
@param pow_lev: power level
@param alpha: significance level
@param direction: direction of the test, two-sided or one-sided
@return: required minimal sample size
"""
# first calculates for a z test
n_z = np.ceil(z_test_sample_size(d, sigma, alpha, pow_lev, direction))
# first iteration for t test
n_t_1 = np.ceil(t_test_sample_size(d, sigma, n_z-1, alpha, pow_lev, direction))
# second iteration for t test
n_t_2 = np.ceil(t_test_sample_size(d, sigma, n_t_1-1, alpha, pow_lev, direction))
return(np.ceil(n_t_2 )) | 319daf6434b774dcf3bf3f6f936a566e1640c175 | 3,654,612 |
def decision_tree_construction(examples, target_attribute, attributes, depth):
"""
:param examples: The data we will use to train the tree(x)
:param target_attribute: The label we want to classify(y)
:param attributes: The number(index) of the labels/attributes of the data-set
:return: The tree corresponding to the given data
"""
# This is the first base condition of the algorithm. It is used if the attributes variable is empty, then we return
# the single-node tree Root, with label = most common value of target_attribute in examples
# The base condition for the recursion when we check if all the variables are same or not in the node and if they
# are same then we return that value as the node
if len(attributes) == 0 or len(np.unique(target_attribute)) == 1:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion part of the algorithm in which we try to find the sub-tree's by using recursion and
# information gain
else:
Information_Gain = Information_Gain_Heuristic(examples, attributes, target_attribute)
best_attribute_number = attributes[np.argmax(Information_Gain)]
# Since we now have the best_attribute(A in algorithm) we will create the root node of the tree/sub-tree with
# that and name the root as the best attribute among all Here we make the tree as a dictionary for testing
# purposes
tree = dict([(best_attribute_number, dict())])
if isinstance(tree, int):
# If the given value is a int value then it's definitely a leaf node and if it's a dictionary then its a
# node
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
attributes.remove(best_attribute_number)
# Now we do the recursive algorithm which will be used to create the tree after the root node.
depth_of_node = []
for each_unique_value in np.unique(examples[best_attribute_number]):
# We use those values for which the examples[best_attribute_number] == each_unique_value
class1 = each_unique_value
new_target_attribute = pd.DataFrame(target_attribute)
total_data = pd.concat([examples, new_target_attribute], axis=1, sort=False)
# WE do this step so that we can pick the values which belong to the best_attribute = [0,1], i.e. We now
# want to divide our data so that the values for the best_attribute is divided among the branches. And
# thus we will have 4 arrays now, two for the data and two for target attribute.
new_data_after_partition = total_data.loc[total_data[best_attribute_number] == class1]
new_target_attribute, new_examples_after_partition = get_attributes_and_labels(new_data_after_partition)
# This is also a condition for our algorithm in which we check if the number of examples after the
# partition are positive or not. If the values are less than 1 then we return the most frequent value in
# the node
if len(new_examples_after_partition) == 0:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion step, in which we make new decision trees till the case when any of the base
# cases are true
new_sub_tree_after_partition, deptha = decision_tree_construction(new_examples_after_partition,
new_target_attribute, attributes,
depth + 1)
depth_of_node.append(deptha)
# Here we are adding the depth of the node so that we can do the depth based pruning
tree[best_attribute_number][each_unique_value] = new_sub_tree_after_partition
if isinstance(new_sub_tree_after_partition, int):
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
return tree, max(depth_of_node) | c9529deb71d3c0a89bbae053aae07e587d277255 | 3,654,613 |
import numpy
def mass_centered(geo):
""" mass-centered geometry
"""
geo = translate(geo, numpy.negative(center_of_mass(geo)))
return geo | 1081141d77383f857f986031fa03510fd2608741 | 3,654,614 |
def binaryMatrix(l, value=PAD_token):
"""
:param l:
:param value:
:return: seq: [3,4,5,0,0]
m: [[1],[1],[1],[0],[0]]
"""
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m | 3c123b1ce8531bcde7c6673f8ca8a91f1300f0bb | 3,654,615 |
def load_map(mappath):
""" Attempt to load map with known loaders
"""
data = None
shirtloader = lambda path: fio.load_map(path)[0][0:3]
maploaders = [load_pfire_map, shirtloader]
for loader in maploaders:
try:
data = loader(mappath)
except (ValueError, OSError):
pass
if data is not None:
break
if data is None:
raise RuntimeError("Failed to load map \"{}\"".format(mappath))
return data | 2ab5c46e0b1ec0ed2e613b42c0553a1d6bcede36 | 3,654,616 |
def ifttt_account_options_topup_source():
""" Option values for topup source account selection"""
return ifttt_account_options(False, "Internal") | 83a0082ccc829c06c12fca2bb588db31468f51ef | 3,654,617 |
from bs4 import BeautifulSoup
def strip_classes(soup:BeautifulSoup, *args:str):
"""
Strip class from given tags in a BeautifulSoup object.
Args:
soup (BeautifulSoup): soup to clean
args ([str]): A list of tags to be unclassed
Returns:
soup (BeautifulSoup)
Modules:
bs4 (BeautifulSoup)
"""
if not args:
args = ['em', 'strong', 'sup']
# delete classes associated with selected tags:
for arg in args:
for tag in soup.find_all(arg):
if tag.has_attr('class'):
del tag.attrs['class']
return(soup) | c2195cd0eaf2cb3f741247b75411d252c7a85e8c | 3,654,618 |
import trace
def take_measurement(n_grid: np.int, n_rays: np.int, r_theta: np.float64) -> (
np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
Take a measurement with the tomograph from direction r_theta.
Arguments:
n_grid: number of cells of grid in each direction
n_rays: number of parallel rays
r_theta: direction of rays (in radians)
Return:
intensities: measured intensities for all <n_rays> rays of the measurement. intensities[n] contains the intensity for the n-th ray
ray_indices: indices of rays that intersect a cell
isect_indices: indices of intersected cells
lengths: lengths of segments in intersected cells
The tuple (ray_indices[n], isect_indices[n], lengths[n]) stores which ray has intersected which cell with which length. n runs from 0 to the amount of ray/cell intersections (-1) of this measurement.
Raised Exceptions:
-
Side Effects:
-
"""
# compute ray direction in Cartesian coordinates
cs = np.cos(r_theta)
sn = np.sin(r_theta)
r_dir = np.array([-cs, -sn])
# compute start positions for rays
r_pos = np.zeros((n_rays, 2))
for i, g in enumerate(np.linspace(-0.99, 0.99, n_rays)):
r_pos[i] = np.array([cs - sn * g, sn + cs * g])
else:
r_pos[0] = np.array([cs, sn])
# compute measures intensities for each ray
intensities = np.zeros(n_rays)
for i, rs in enumerate(r_pos):
intensities[i] = trace(rs, r_dir)
# take exponential fall off into account
intensities = np.log(1.0 / intensities)
# compute traversal distance in each grid cell
ray_indices, isect_indices, lengths = grid_intersect(n_grid, r_pos, r_dir)
return intensities, ray_indices, isect_indices, lengths | f0ffac9da088402cff126bab9ee880ff33c460f1 | 3,654,619 |
def chrom_karyo_sort(chroms):
"""
:param chroms:
:return:
"""
ordered = []
unordered = []
for cname, size in chroms:
try:
ord = int(cname.lower().strip('chr'))
ordered.append((cname, size, ord * 10))
except ValueError:
ord = check_special_chroms(cname)
if ord > 0:
ordered.append((cname, size, ord))
else:
unordered.append((cname, size, -1))
unordered = sorted(unordered, key=lambda x: x[1], reverse=True)
ordered = sorted(ordered, key=lambda x: x[2])
ordered.extend(unordered)
return [(t[0], t[1]) for t in ordered] | 4531be10ad0c51e0257089aabda778357b2d7950 | 3,654,620 |
from typing import List
def calibrate_stereo(observations_left: List, observations_right: List, detector: FiducialCalibrationDetector,
num_radial: int = 4, tangential: bool = False, zero_skew: bool = True) -> (StereoParameters, List):
"""
Calibrates a stereo camera using a Brown camera model
:param observations: List of {"points":(boofcv detections),"width":(image width),"height":(image height)}
:param detector:
:param num_radial:
:param tangential:
:param zero_skew:
:return:
"""
jlayout = detector.java_obj.getLayout(0) # Hard coded for a single target
jcalib_planar = gateway.jvm.boofcv.abst.geo.calibration.CalibrateStereoPlanar(jlayout)
jcalib_planar.configure(zero_skew, int(num_radial), tangential)
for idx in range(len(observations_left)):
jobs_left = convert_into_boof_calibration_observations(observations_left[idx])
jobs_right = convert_into_boof_calibration_observations(observations_right[idx])
jcalib_planar.addPair(jobs_left, jobs_right)
stereo_parameters = StereoParameters(jcalib_planar.process())
errors = []
for jerror in jcalib_planar.computeErrors():
errors.append({"mean": jerror.getMeanError(),
"max_error": jerror.getMaxError(),
"bias_x": jerror.getBiasX(), "bias_y": jerror.getBiasY()})
return (stereo_parameters, errors) | bf9ee5b369f8614728db0023674c85a958a2559f | 3,654,621 |
from typing import Type
def register_producer_class(cls: Type[C]) -> Type[C]:
"""Registers the producer class and returns it unmodified."""
if not cls.TYPES:
raise ProducerInterfaceError(
f"Invalid producer. When defining producer, make sure to specify at least 1 type in the TYPES class variable."
)
for artifact_type in cls.ARTIFACT_TYPES:
if not (
isclass(artifact_type) and issubclass(artifact_type, BaseArtifact)
):
raise ProducerInterfaceError(
f"Associated artifact type {artifact_type} for producer is not a class or is not a subclass of BaseArtifact."
)
artifact_types = cls.ARTIFACT_TYPES or (BaseArtifact,)
for t in cls.TYPES:
if not isclass(t):
raise ProducerInterfaceError(
f"Associated type {t} for producer is not a class."
)
producer_registry.register_producer(
t,
cls,
)
type_registry.register_artifact_type(
t,
artifact_types,
)
return cls | 7155ddb85077e2774fcc20c2d80345bd52ee86b1 | 3,654,622 |
import glob
import os
def get_file_list(var, obsname, start_date, end_date):
"""
Get a list of data set files that covers the time period defined by
start_date and end_date provided in the function call.
Parameters
----------
var: str
Input variable, e.g. 'tas'
obsname: str
Name of dataset to use, e.g. 'EOBS'
start_date: str
Start date of time period, format YYYYMM
end_date: str
End date of time period, format YYYYMM
Returns
-------
file_list: list
List of obs data files
"""
meta_data = obs_data()
data_dict = meta_data[var][obsname]
file_pattern = data_dict['file pattern']
sidx = file_pattern.find('YYYYMM')
eidx = file_pattern.rfind('YYYYMM')
obs_path_list = glob.glob(os.path.join(data_dict['path'],
file_pattern[:sidx] + '*.nc'))
obs_path_list.sort()
obs_file_list = [l.split('/')[-1] for l in obs_path_list]
obs_dates = ['{}-{}'.format(f[sidx:sidx+6], f[eidx:eidx+6])
for f in obs_file_list]
idx_start = [d.split('-')[0] <= start_date <= d.split('-')[1]
for d in obs_dates]
msg = "Files not found OR selected start date {} ".format(start_date) +\
"does not match any obs file dates!"
assert np.sum(idx_start) != 0, msg
idx_start = np.where(idx_start)[0][0]
idx_end = [d.split('-')[0] <= end_date <= d.split('-')[1]
for d in obs_dates]
msg = "Files not found OR selected end date {} ".format(end_date) +\
"does not match any obs file dates!"
assert np.sum(idx_end) != 0, msg
idx_end = np.where(idx_end)[0][0]
return obs_path_list[idx_start: idx_end + 1] | de9780faca7830a7c26864f57b077e75eb1c7246 | 3,654,623 |
def structures_at_boundaries(gdf, datamodel, areas, structures, tolerance, distance):
"""
Check if there are structures near area (typically water-level areas) boundaries.
Parameters
----------
gdf : ExtendedGeoDataframe
ExtendedGeoDataFrame, HyDAMO hydroobject layer
datamodel : HyDAMO
HyDAMO datamodel class
areas : str
HyDAMO datamodel class with areas ("peilgebiedenpraktijk")
structures : str
List with structure-types to be expected on the boundary
tolerance : numeric
Tolerance to dermine if a structure is on the hydroobject
distance : numeric
Max distance between structure and area-boundary
Returns
-------
Pandas Series
Default dtype is bool
"""
areas_gdf = getattr(datamodel, areas)
areas_sindex = areas_gdf.sindex
struc_series = _layers_from_datamodel(structures, datamodel)
struc_sindex = struc_series.sindex
return gdf.apply(
lambda x: _structures_at_boundaries(
x, areas_gdf, areas_sindex, struc_series, struc_sindex, tolerance, distance
),
axis=1,
) | 6f1c83f2ac02b6773bf51f64326ed4f6e3c7c354 | 3,654,624 |
from typing import List
from typing import Tuple
from typing import Union
def above_cutoff(gene_freq_tup_list: List[Tuple[Union[str, tuple], Tuple[str, str]]], cutoff: int) -> List[str]:
"""Return the genes/edges that are are in at least the given cutoff's networks
Parameters
----------
gene_freq_tup_list : List[Tuple[Union[str, tuple], Tuple[str, str]]]
list of (comparison_object, (frequency_count, percent)) tuples in order of most common
should be return from most_common()
cutoff : int
number to be used as minimum for how many networks the object must be present in to be returned
Returns
-------
list of objects that were in at least as many networks as the cutoff given
"""
above = []
for gene, freq in gene_freq_tup_list:
if count_in_freq(freq) >= cutoff:
above.append(gene)
else:
break # since it's ordered, no need wasting time checking the rest
return above | c5679743d0b87fcbf7b6955a755aa8bbb11f5f95 | 3,654,625 |
def normalizeWindows(X):
"""
Do point centering and sphere normalizing to each window
to control for linear drift and global amplitude
Parameters
----------
X: ndarray(N, Win)
An array of N sliding windows
Returns
XRet: ndarray(N, Win)
An array in which the mean of each row is zero
and the norm of each row is 1
"""
XRet = X - np.mean(X, 1)[:, None]
Norm = np.sqrt(np.sum(XRet**2, 1))
Norm[Norm == 0] = 1
XRet /= Norm[:, None]
return XRet | 013b5829153ee21979bcf9dac8457beb1adbe2a2 | 3,654,626 |
import torch
def cost_matrix_slow(x, y):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
"""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
# if y is None:
# dist = dist - torch.diag(dist.diag)
return torch.clamp(dist, 0.0, np.inf) | c27889346d6fb1a075eabf908f5e56ececb554d0 | 3,654,627 |
def get_dists(ts1_sax, ts2_sax, lookup_table):
"""
Compute distance between each symbol of two words (series) using a lookup table
ts1_sax and ts2_sax are two sax representations (strings) built under the same conditions
"""
# Verify integrity
if ts1_sax.shape[0] != ts2_sax.shape[0]:
return -1
# convert symbol series into series of indexes (symbol indexes)
ts1_sax_id = symbol2index(ts1_sax)
ts2_sax_id = symbol2index(ts2_sax)
# array of distances between symbols
dists = np.zeros(ts1_sax.shape[0])
for i in range(ts1_sax_id.shape[0]):
dists[i] = lookup_table[ts1_sax_id[i], ts2_sax_id[i]]
return dists | 3da21a1c57952225326c97bb7a58238545131e94 | 3,654,628 |
def get_dom_coords(string, dom):
"""Get Coordinates of a DOM specified by the string and dom number.
Parameters
----------
string : int
String number (between 1 and 86)
dom : int
DOM number (between 1 and 60)
Returns
-------
tuple(float, float, float)
The x, y, z coordinates of the DOM.
"""
assert string > 0 and string <= 86, 'String must be within [1, 86]'
assert dom > 0 and dom <= 60, 'DOM must be within [1, 86]'
a, b = get_matrix_indices(string)
dom_id = dom - 1
return x_ic78_coords[a, b, dom_id] | 08e0817ab85e71caa38e6c247e1cc488d03ce1c0 | 3,654,629 |
def relevance_ka(x):
"""
based on code from https://www.kaggle.com/aleksandradeis/regression-addressing-extreme-rare-cases
see paper: https://www.researchgate.net/publication/220699419_Utility-Based_Regression
use the sigmoid function to create the relevance function, so that relevance function
has values close to 1 when the target variable is greater than 0.6
Args:
x: the x values for which the relevance should be returned
"""
x = np.array(x)
return sigmoid((x-0.5) * 15) | e056c8ae1b1c527dc1a875c201967eacf914d7e0 | 3,654,630 |
from datetime import datetime
def now(mydateformat='%Y%m%dT%H%M%S'):
""" Return current datetime as string.
Just a shorthand to abbreviate the common task to obtain the current
datetime as a string, e.g. for result versioning.
Args:
mydateformat: optional format string (default: '%Y%m%dT%H%M%S')
Returns:
datetime.now(), formated to string with argument mydateformat, e.g.
YYYYMMDDThhmmss ==> 20131007H123456
"""
return datetime.now().strftime(mydateformat) | f4f98116700888a4be273143d635c62859c96e03 | 3,654,631 |
import os
def getOnePackageInfo(pkgpath):
"""Gets receipt info for a single bundle-style package"""
pkginfo = {}
plist = getBundleInfo(pkgpath)
if plist:
pkginfo['filename'] = os.path.basename(pkgpath)
try:
if 'CFBundleIdentifier' in plist:
pkginfo['packageid'] = plist['CFBundleIdentifier']
elif 'Bundle identifier' in plist:
# special case for JAMF Composer generated packages.
pkginfo['packageid'] = plist['Bundle identifier']
else:
pkginfo['packageid'] = os.path.basename(pkgpath)
if 'CFBundleName' in plist:
pkginfo['name'] = plist['CFBundleName']
if 'IFPkgFlagInstalledSize' in plist:
pkginfo['installed_size'] = int(plist['IFPkgFlagInstalledSize'])
pkginfo['version'] = getBundleVersion(pkgpath)
except (AttributeError,
FoundationPlist.NSPropertyListSerializationException):
pkginfo['packageid'] = 'BAD PLIST in %s' % \
os.path.basename(pkgpath)
pkginfo['version'] = '0.0'
## now look for applications to suggest for blocking_applications
#bomlist = getBomList(pkgpath)
#if bomlist:
# pkginfo['apps'] = [os.path.basename(item) for item in bomlist
# if item.endswith('.app')]
else:
# look for old-style .info files!
infopath = os.path.join(
pkgpath, 'Contents', 'Resources', 'English.lproj')
if os.path.exists(infopath):
for item in osutils.listdir(infopath):
if os.path.join(infopath, item).endswith('.info'):
pkginfo['filename'] = os.path.basename(pkgpath)
pkginfo['packageid'] = os.path.basename(pkgpath)
infofile = os.path.join(infopath, item)
infodict = parseInfoFile(infofile)
pkginfo['version'] = infodict.get('Version', '0.0')
pkginfo['name'] = infodict.get('Title', 'UNKNOWN')
break
return pkginfo | 5161eaf806a0f616814c92d2b9693a02c16bdff4 | 3,654,632 |
from datetime import datetime
def cmp_point_identities(a, b):
"""
Given point identities a, b (may be string, number, date, etc),
collation algorithm compares:
(a) strings case-insensitively
(b) dates and datetimes compared by normalizing date->datetime.
(c) all other types use __cmp__(self, other) defaults from type.
"""
dt = lambda d: datetime(*d.timetuple()[0:6]) # date|datetime -> datetime
if isinstance(a, basestring) and isinstance(b, basestring):
return cmp(a.upper(), b.upper())
if isinstance(a, date) or isinstance(b, date):
return cmp(dt(a), dt(b))
return cmp(a, b) | 475206398fc0c2f301446c5c264bf67d1671a2ad | 3,654,633 |
def shortest_complement(t, m, l):
"""
Given a primitive slope t and the holonomies of the current
meridian and longitude, returns a shortest complementary slope s
so that s.t = +1.
"""
c, d = t # second slope
_, a, b = xgcd(d, c) # first slope
b = -b
assert a*d - b*c == 1
return a_shortest_lattice_point_on_line((a, b), (c, d), m, l) | 4653a7eac7af7ed8a67ce298f1453236cfeabf73 | 3,654,634 |
def run_pii(text, lang):
"""
Runs the given set of regexes on the data "lines" and pulls out the
tagged items.
The lines structure stores the language type(s). This can be used for
language-specific regexes, although we're dropping that for now and using
only "default"/non-language-specific regexes.
"""
#print('Detecting....')
# What is this for...?
text = text.encode().decode()
matches = detect_pii(text, lang, high_risk_tags)
#print(matches)
match_set = (text, {})
if len(matches) > 0:
# !!! REDACTION HAPPENS HERE !!!
redacted_str, metadata = redact_pii(text, matches)
metadata_out = {"regex metadata":metadata, "original": text, "redacted": redacted_str}
match_set = (redacted_str, metadata_out)
return match_set | e9f34686be27773952f64a9231e86c76c0170483 | 3,654,635 |
def get_ref_cat(butler, visit, center_radec, radius=2.1):
"""
Get the reference catalog for the desired visit for the requested
sky location and sky cone radius.
"""
ref_cats = RefCat(butler)
try:
band = list(butler.subset('src', visit=visit))[0].dataId['filter']
except dp.butlerExceptions.NoResults:
band = list(butler.subset('src', expId=visit))[0].dataId['filter']
centerCoord = lsst_geom.SpherePoint(center_radec[0]*lsst_geom.degrees,
center_radec[1]*lsst_geom.degrees)
return ref_cats(centerCoord, band, radius) | d2814729aeb775668d6eff6fdc68a0676168b16e | 3,654,636 |
def replace_dict(d, **kwargs):
"""
Replace values by keyword on a dict, returning a new dict.
"""
e = d.copy()
e.update(kwargs)
return e | be1cc21be5320eeea13307dd4ed5025b51339eec | 3,654,637 |
def pageHeader(
headline="",
tagline=""):
"""
*Generate a pageHeader - TBS style*
**Key Arguments:**
- ``headline`` -- the headline text
- ``tagline`` -- the tagline text for below the headline
**Return:**
- ``pageHeader`` -- the pageHeader
"""
pageHeader = """
<div class="page-header" id=" ">
<h1>%(headline)s<br><small>%(tagline)s</small></h1>
</div>""" % locals()
return pageHeader | 7d9e91df8af2fff92b0b7096cd1a13198d899e15 | 3,654,638 |
def get_counter_merge_suggestion(merge_suggestion_tokens):
"""Return opposite of merge suggestion
Args:
merge_suggestion_tokens (list): tokens in merge suggestion
Returns:
str: opposite of merge suggestion
"""
counter_merge_suggestion = ' '.join(merge_suggestion_tokens)
if merge_suggestion_tokens[-1][-1] == '་':
counter_merge_suggestion += " "
return counter_merge_suggestion | e32e0f1b64fe77acaa8d88d72dca9304b7427674 | 3,654,639 |
import re
from datetime import datetime
import pytz
def parse_rfc3339_utc_string(rfc3339_utc_string):
"""Converts a datestamp from RFC3339 UTC to a datetime.
Args:
rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime.
"""
# The timestamp from the Google Operations are all in RFC3339 format, but
# they are sometimes formatted to millisconds, microseconds, sometimes
# nanoseconds, and sometimes only seconds:
# * 2016-11-14T23:05:56Z
# * 2016-11-14T23:05:56.010Z
# * 2016-11-14T23:05:56.010429Z
# * 2016-11-14T23:05:56.010429380Z
m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z',
rfc3339_utc_string)
# It would be unexpected to get a different date format back from Google.
# If we raise an exception here, we can break people completely.
# Instead, let's just return None and people can report that some dates
# are not showing up.
# We might reconsider this approach in the future; it was originally
# established when dates were only used for display.
if not m:
return None
groups = m.groups()
if len(groups[6]) not in (0, 3, 6, 9):
return None
# Create a UTC datestamp from parsed components
# 1- Turn components 0-5 from strings to integers
# 2- If the last component does not exist, set it to 0.
# If it does exist, make sure to interpret it as milliseconds.
g = [int(val) for val in groups[:6]]
fraction = groups[6]
if not fraction:
micros = 0
elif len(fraction) == 3:
micros = int(fraction) * 1000
elif len(fraction) == 6:
micros = int(fraction)
elif len(fraction) == 9:
# When nanoseconds are provided, we round
micros = int(round(int(fraction) // 1000))
else:
assert False, 'Fraction length not 0, 6, or 9: {}'.format(len(fraction))
try:
return datetime.datetime(
g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)
except ValueError as e:
assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(
rfc3339_utc_string, e) | 04653bd5673c5ca7713c9e6014947886781f3f5e | 3,654,640 |
from datetime import datetime
def response(code, body='', etag=None, last_modified=None, expires=None, **kw):
"""Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
"""
if etag is not None:
if not (etag[0] == '"' and etag[-1] == '"'):
etag = '"%s"' % etag
kw['etag'] = etag
if last_modified is not None:
kw['last_modified'] = datetime_to_httpdate(last_modified)
if expires is not None:
if isinstance(expires, datetime):
kw['expires'] = datetime_to_httpdate(expires)
else:
kw['expires'] = timedelta_to_httpdate(expires)
headers = [(k.replace('_', '-').title(), v) for k, v in sorted(kw.items())]
return Response(code, headers, body) | 094e7dc99114d4b742808c0aa123001fb301fb14 | 3,654,641 |
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
app.logger.debug(flask.url_for('oauth2callback', _external=True))
#return_url = "http://localhost:5000" + flask.url_for('oauth2callback')
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('getCalendars',muID=flask.session['meetupId'])) | 07c232275ad93d2b8d47a6d0b7de03f57fa356c8 | 3,654,642 |
from datetime import datetime
import click
def parse_tweet(raw_tweet, source, now=None):
"""
Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet
"""
if now is None:
now = datetime.now(timezone.utc)
raw_created_at, text = raw_tweet.split("\t", 1)
created_at = parse_iso8601(raw_created_at)
if created_at > now:
raise ValueError("Tweet is from the future")
return Tweet(click.unstyle(text.strip()), created_at, source) | 85f90ce469091cc82dd120e6c100859f8bcc8f2c | 3,654,643 |
import requests
def scopes(request, coalition_id):
"""
Update coalition required scopes with a specific set of scopes
"""
scopes = []
for key in request.POST:
if key in ESI_SCOPES:
scopes.append(key)
url = f"{GLOBAL_URL}/{coalition_id}"
headers = global_headers(request, {"Content-type": "application/json"})
data = "{\"mandatory_esi_scopes\": [\"" + "\",\"".join(scopes) + "\"]}"
request_change_scopes = requests.put(url, headers=headers, data=data)
if request_change_scopes.status_code != 200:
return render_error(request_change_scopes)
params = urlencode({"changed_scopes": "true"})
return_url = reverse("coalition-sheet", args=[coalition_id]) + "?" + params
return redirect(return_url) | 71d07be26815a8e30ed37074b4452fa7574d07b5 | 3,654,644 |
def recursive_dictionary_cleanup(dictionary):
"""Recursively enrich the dictionary and replace object links with names etc.
These patterns are replaced:
[phobostype, bpyobj] -> {'object': bpyobj, 'name': getObjectName(bpyobj, phobostype)}
Args:
dictionary(dict): dictionary to enrich
Returns:
: dict -- dictionary with replace/enriched patterns
"""
for key, value in dictionary.items():
# handle everything as list, so we can loop over it
unlist = False
if not isinstance(value, list):
value = [value]
unlist = True
itemlist = []
for item in value:
if isinstance(item, list) and item:
# (phobostype, bpyobj) -> {'object': bpyobj, 'name': getObjectName(bpyobj)}
if (
len(item) == 2
and isinstance(item[0], str)
and (item[0] in ['joint'] + [enum[0] for enum in defs.phobostypes])
and isinstance(item[1], bpy.types.Object)
):
itemlist.append(
{
'object': item[1],
'name': nUtils.getObjectName(item[1], phobostype=item[0]),
}
)
# recursion on subdictionaries
elif isinstance(item, dict):
itemlist.append(recursive_dictionary_cleanup(item))
else:
itemlist.append(item)
# extract single items back out of the list
dictionary[key] = itemlist if not unlist else itemlist[0]
return dictionary | b49798dd1918401951bae57e544406ee1d14ebd6 | 3,654,645 |
def validate_dtype(dtype_in):
"""
Input is an argument represention one, or more datatypes.
Per column, number of columns have to match number of columns in csv file:
dtype = [pa.int32(), pa.int32(), pa.int32(), pa.int32()]
dtype = {'__columns__': [pa.int32(), pa.int32(), pa.int32(), pa.int32()]}
Default:
dtype_in = pa.int32()
dtype_out = {'__default__': pa.int32()}
Not yet supported:
Default, optional column overwrite:
dtype_in = {'__default__': pa.int32(), '__columns__': {'colname': pa.int32()}}
dtype_out = raise ValueError
dtype_in = {'colname': pa.int32()}
dtype_out = raise ValueError
"""
if dtype_in is None:
# use default datatype
dtype_in = pa.float32()
argtype = type(dtype_in)
valid_types = _dtypes_from_arrow()
if argtype is pa.DataType:
if dtype_in not in list(valid_types.keys()):
raise ValueError('Not supporting type: ' + dtype_in.__str__())
return {'__default__': valid_types[dtype_in]}
if argtype is dict:
raise ValueError('Not yet supported dict')
if argtype is list and dtype_in.__len__() > 0:
matches = [dtype in list(valid_types.keys()) for dtype in dtype_in]
if False in matches:
mismatches = [dtype_in[j].__str__() + '(column:' + str(j) + ')'
for j in range(0, len(matches)) if matches[j] is False]
raise ValueError('List contains unsupported datatype: ' + ','.join(mismatches))
if set(dtype_in).__len__() == 1:
# all list members are of same type
return {'__default__': valid_types[dtype_in[0]]}
return {'__columns__': list([valid_types[dtype] for dtype in dtype_in])}
raise ValueError('No input to match datatypes') | 274df2e010314867f31c14951f1e0b18190218ad | 3,654,646 |
import os
import sys
def get_secret(name):
"""Load a secret from file or env
Either provide ``{name}_FILE`` or ``{name}`` in the environment to
configure the value for ``{name}``.
"""
try:
with open(os.environ[name + "_FILE"]) as secret_file:
return secret_file.read().strip()
except (FileNotFoundError, PermissionError, KeyError):
try:
return os.environ[name]
except KeyError:
if os.path.basename(sys.argv[0]) == 'sphinx-build':
# We won't have nor need secrets when building docs
return None
raise ValueError(
f"Missing secrets: configure {name} or {name}_FILE to contain or point at secret"
) from None | 3b240f7b494c7817f58c8ab3f7f9000ff7f85844 | 3,654,647 |
from typing import Callable
from re import T
from typing import List
from typing import Any
from typing import Dict
def cache(
cache_class: Callable[[], base_cache.BaseCache[T]],
serializer: Callable[[], cache_serializer.CacheSerializer],
conditional: Callable[[List[Any], Dict[str, Any]], bool] = _always_true):
"""
cache
=====
parameters:
cache_class (base_cache.BaseCache)
conditional (Callable[[List[Any], Dict[str, Any]])
Decorator that caches function results using the provided class. The class
must be a subclass of base_cache, providing get and set methods with
appropriate signatures.
An optional conditional can be passed, which receives the *args and
**kwargs of the called function. This function determines whether or not to
cache, or to always recompute, based on whether it returns True or False.
"""
serializer_instance = serializer()
cache_instance = cache_class()
return curry(_wrapper, cache_instance, serializer_instance, conditional) | c4d5318d471e13f5001eb12b6d3dc4f278478855 | 3,654,648 |
def words2chars(images, labels, gaplines):
""" Transform word images with gaplines into individual chars """
# Total number of chars
length = sum([len(l) for l in labels])
imgs = np.empty(length, dtype=object)
newLabels = []
height = images[0].shape[0]
idx = 0;
for i, gaps in enumerate(gaplines):
for pos in range(len(gaps) - 1):
imgs[idx] = images[i][0:height, gaps[pos]:gaps[pos+1]]
newLabels.append(char2idx(labels[i][pos]))
idx += 1
print("Loaded chars from words:", length)
return imgs, newLabels | e04bf5b1e9b47c2f930600433b4214343e067f26 | 3,654,649 |
def create_spark_session(spark_jars: str) -> SparkSession:
"""
Create Spark session
:param spark_jars: Hadoop-AWS JARs
:return: SparkSession
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", spark_jars) \
.appName("Sparkify ETL") \
.getOrCreate()
return spark | 576072460e465610fff98da377cc20a8472c537f | 3,654,650 |
from datetime import datetime
def make_expired(request, pk):
"""
将号码状态改为过号
"""
try:
reg = Registration.objects.get(pk=pk)
except Registration.DoesNotExist:
return Response('registration not found', status=status.HTTP_404_NOT_FOUND)
data = {
'status': REGISTRATION_STATUS_EXPIRED
}
serializer = RegistrationSerializer(reg, data=data, partial=True)
if serializer.is_valid():
reg = serializer.save()
reg.end_time = datetime.datetime.now()
reg.save()
# 通知后面第n位的顾客就餐
_notify_ready(reg.table_type)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | 827f45c12bcbb973eb073662d8a14765422fdf51 | 3,654,651 |
def word2vec_similarity(segmented_topics, accumulator, with_std=False, with_support=False):
"""For each topic segmentation, compute average cosine similarity using a
:class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator`.
Parameters
----------
segmented_topics : list of lists of (int, `numpy.ndarray`)
Output from the :func:`~gensim.topic_coherence.segmentation.s_one_set`.
accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or
:class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator`
Word occurrence accumulator.
with_std : bool, optional
True to also include standard deviation across topic segment sets
in addition to the mean coherence for each topic.
with_support : bool, optional
True to also include support across topic segments. The support is defined as
the number of pairwise similarity comparisons were used to compute the overall topic coherence.
Returns
-------
list of (float[, float[, int]])
Сosine word2vec similarities per topic (with std/support if `with_std`, `with_support`).
Examples
--------
.. sourcecode:: pycon
>>> import numpy as np
>>> from gensim.corpora.dictionary import Dictionary
>>> from gensim.topic_coherence import indirect_confirmation_measure
>>> from gensim.topic_coherence import text_analysis
>>>
>>> # create segmentation
>>> segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]]
>>>
>>> # create accumulator
>>> dictionary = Dictionary()
>>> dictionary.id2token = {1: 'fake', 2: 'tokens'}
>>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary)
>>> _ = accumulator.accumulate([['fake', 'tokens'], ['tokens', 'fake']], 5)
>>>
>>> # should be (0.726752426218 0.00695475919227)
>>> mean, std = indirect_confirmation_measure.word2vec_similarity(segmentation, accumulator, with_std=True)[0]
"""
topic_coherences = []
total_oov = 0
for topic_index, topic_segments in enumerate(segmented_topics):
segment_sims = []
num_oov = 0
for w_prime, w_star in topic_segments:
if not hasattr(w_prime, '__iter__'):
w_prime = [w_prime]
if not hasattr(w_star, '__iter__'):
w_star = [w_star]
try:
segment_sims.append(accumulator.ids_similarity(w_prime, w_star))
except ZeroDivisionError:
num_oov += 1
if num_oov > 0:
total_oov += 1
logger.warning(
"%d terms for topic %d are not in word2vec model vocabulary",
num_oov, topic_index)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
if total_oov > 0:
logger.warning("%d terms for are not in word2vec model vocabulary", total_oov)
return topic_coherences | 1a3d439e75c4732138f42ea14e7fd50eb6e7d5cb | 3,654,652 |
def addGroupsToKey(server, activation_key, groups):
"""
Add server groups to a activation key
CLI Example:
.. code-block:: bash
salt-run spacewalk.addGroupsToKey spacewalk01.domain.com 1-my-key '[group1, group2]'
"""
try:
client, key = _get_session(server)
except Exception as exc: # pylint: disable=broad-except
err_msg = "Exception raised when connecting to spacewalk server ({}): {}".format(
server, exc
)
log.error(err_msg)
return {"Error": err_msg}
all_groups = client.systemgroup.listAllGroups(key)
groupIds = []
for group in all_groups:
if group["name"] in groups:
groupIds.append(group["id"])
if client.activationkey.addServerGroups(key, activation_key, groupIds) == 1:
return {activation_key: groups}
else:
return {activation_key: "Failed to add groups to activation key"} | 346690a9eac24f62f4410b23f60bb589d174c9ed | 3,654,653 |
def get_user_for_delete():
"""Query for Users table."""
delete_user = Users.query \
.get(DELETE_USER_ID)
return delete_user | 208dbbe47550c6889848b7ff61324acf23a4c495 | 3,654,654 |
from typing import List
from typing import Optional
def station_code_from_duids(duids: List[str]) -> Optional[str]:
"""
Derives a station code from a list of duids
ex.
BARRON1,BARRON2 => BARRON
OSBAG,OSBAG => OSBAG
"""
if type(duids) is not list:
return None
if not duids:
return None
if len(duids) == 0:
return None
duids_uniq = list(set(duids))
common = findcommonstart(duids_uniq)
if not common:
return None
# strip last character if we have one
if is_single_number(common[-1]):
common = common[:-1]
if common.endswith("_"):
common = common[:-1]
if len(common) > 2:
return common
return None | 1f976ee0b7a82453673ea07c20070e502df5fcf5 | 3,654,655 |
def erosion(image, selem, out=None, shift_x=False, shift_y=False):
"""Return greyscale morphological erosion of an image.
Morphological erosion sets a pixel at (i,j) to the minimum over all pixels
in the neighborhood centered at (i,j). Erosion shrinks bright regions and
enlarges dark regions.
Parameters
----------
image : ndarray
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray
The array to store the result of the morphology. If None is
passed, a new array will be allocated.
shift_x, shift_y : bool
shift structuring element about center point. This only affects
eccentric structuring elements (i.e. selem with even numbered sides).
Returns
-------
eroded : uint8 array
The result of the morphological erosion.
Examples
--------
>>> # Erosion shrinks bright regions
>>> import numpy as np
>>> from skimage.morphology import square
>>> bright_square = np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> erosion(bright_square, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
if image is out:
raise NotImplementedError("In-place erosion not supported!")
image = img_as_ubyte(image)
selem = img_as_ubyte(selem)
return cmorph._erode(image, selem, out=out,
shift_x=shift_x, shift_y=shift_y) | 2e7c2547b862add24cc6a4355cf3e0308cb2f342 | 3,654,656 |
def NE(x=None, y=None):
"""
Compares two values and returns:
true when the values are not equivalent.
false when the values are equivalent.
See https://docs.mongodb.com/manual/reference/operator/aggregation/ne/
for more details
:param x: first value or expression
:param y: second value or expression
:return: Aggregation operator
"""
if x is None and y is None:
return {'$ne': []}
return {'$ne': [x, y]} | be721daf480ec0cb465a3c010c4f910a10fbbb1d | 3,654,657 |
def TCPs_from_tc(type_constraint):
"""
Take type_constraint(type_param_str, allowed_type_strs) and return list of TypeConstraintParam
"""
tys = type_constraint.allowed_type_strs # Get all ONNX types
tys = set(
[onnxType_to_Type_with_mangler(ty) for ty in tys]
) # Convert to Knossos and uniquify
return [
TypeConstraintParam(type_constraint.type_param_str, ty) for ty in tys
] | 7c2162bb2dde0b00caf289511f20804cadaa17e5 | 3,654,658 |
def _randomde(allgenes,
allfolds,
size):
"""Randomly select genes from the allgenes array and fold changes from the
allfolds array. Size argument indicates how many to draw.
Parameters
----------
allgenes : numpy array
numpy array with all the genes expressed in the cells where de is generated
allfolds : numpy array
an array of fold changes from which the simulation should draw
size : int
number of non-zero weights (typically number of DE genes)
Returns
-------
type : PandasDataFrame
DataFrame with randomly chosen genes and weights.
"""
rdgenes = np.random.choice(allgenes, size, replace = False)
rdfolds = np.random.choice(allfolds, size, replace = False)
rdDF = pd.DataFrame({'id' : rdgenes, 'weights' : rdfolds})
return(rdDF) | 1a5f38eab8933b90697f4999cb7571fe602db3f9 | 3,654,659 |
import time
def XCor(spectra, mask_l, mask_h, mask_w, vel, lbary_ltopo, vel_width=30,\
vel_step=0.3, start_order=0, spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300.):
"""
Calculates the cross-correlation function for a Coralie Spectra
"""
# speed of light, km/s
c = 2.99792458E5
# loop over orders
norders = spectra.shape[1]
# determine minimum velocities
vel_min = vel - vel_width
vel_max = vel + vel_width
N = int(np.ceil( (2*vel_width) / vel_step ))
Xcor_full = np.zeros( (N, norders+1) )
sn = np.zeros( (norders) )
nlines_used = np.zeros( (norders) )
velocities = vel_min + np.arange( N ) * vel_step
Xcor_full[:,0] = velocities
weight=0.0
mask_middle = 0.5*(mask_l + mask_h)
W = np.zeros( norders )
vwt = 300
for j in range(start_order,norders):
t1 = time.time()
LL = np.where( spectra[spec_order,j,:] != 0 )
if len(LL[0]) > 0:
x1 = np.min( LL )
x2 = np.max( LL )
w1 = np.argmin( np.absolute( spectra[0,j,:] - spectra[0,j,x1] ) )
w2 = np.argmin( np.absolute( spectra[0,j,:] - spectra[0,j,x2] ) )
l1_0 = spectra[0,j,w1] / lbary_ltopo
l2_0 = spectra[0,j,w2] / lbary_ltopo
ww1 = np.argmin( np.abs( spectra[0,j,:] - l1_0*(1+(31+max_vel_rough)/c) ) )
ww2 = np.argmin( np.abs( spectra[0,j,:] - l2_0*(1-(31+max_vel_rough)/c) ) )
# should not happen, but hey, just in case...
if (ww1 < w1):
ww1 = w1
if (ww2 > w2):
ww2 = w2
l1 = spectra[0,j,ww1]
l2 = spectra[0,j,ww2]
II = np.where( (mask_l > l1) & (mask_h < l2) )
#if len(II[0])>0:
#print j,II[0][0],II[0][-1]
nlu = len(II[0])
nlines_used[j] = nlu
snw1 = int(0.25*spectra.shape[2])
snw2 = int(0.75*spectra.shape[2])
if (nlu > 0):
# calculate median S/N
#median_sn = np.median( spectra[5,j,w1:w2] * np.sqrt( spectra[6,j,w1:w2] ) )
median_sn = np.median( spectra[sn_order,j,snw1:snw2] )
sn[j] = median_sn
S = spectra[spec_order,j,w1:w2]
#iv = spectra[iv_order,j,w1:w2]
signal2noise = spectra[sn_order,j,w1:w2]
snwa = np.zeros(N)
for k in range(N):
#print k
Xcor_full[k,j+1], snw = CCF.ccfcos(mask_l[II], mask_h[II], spectra[0,j,w1:w2], S,\
mask_w[II], signal2noise, vel_min + k*vel_step)
snwa[k] = snw
if np.isnan(Xcor_full[k,j+1]):
Xcor_full[k,j+1] = Xcor_full[k-1,j+1]
snwa[k] = snwa[k-1]
#if k ==182 and j==35:
# #print mask_l[II], mask_h[II], spectra[0,j,w1:w2], S,mask_w[II], signal2noise, vel_min + k*vel_step
# #for z in range(len(mask_l[II])):
# # III = np.where((spectra[0,j,w1:w2]>=mask_l[II][z])&(spectra[0,j,w1:w2]<=mask_h[II][z]))[0]
# # print spectra[0,j,w1:w2][III],S[III]
# #print Xcor_full[k,j+1]
# #print snw
# #print gfd
xc_weight = np.median( snwa )
Xcor_full[:,j+1] /= snwa #xc_weight
W[j] = xc_weight
return velocities, Xcor_full, sn, nlines_used, W | 7007c56e5173999b9d20dfbd8018133a59bb777c | 3,654,660 |
import json
def marks_details(request, pk):
"""
Display details for a given Mark
"""
# Check permission
if not has_access(request):
raise PermissionDenied
# Get context
context = get_base_context(request)
# Get object
mark = get_object_or_404(Mark, pk=pk)
mark.category_clean = mark.get_category_display()
context['mark'] = mark
# Get users connected to the mark
context['mark_users'] = mark.given_to.all()
# AJAX
if request.method == 'POST':
if request.is_ajax and 'action' in request.POST:
resp = {'status': 200}
context, resp = _handle_mark_detail(request, context, resp)
# Set mark
resp['mark'] = {'last_changed_date': context['mark'].last_changed_date.strftime("%Y-%m-%d"),
'last_changed_by': context['mark'].last_changed_by.get_full_name()}
# Return ajax
return HttpResponse(json.dumps(resp), status=resp['status'])
# Render view
return render(request, 'marks/dashboard/marks_details.html', context) | 1f193c67f1e047ecd6da0e5eec1d29da50f6595e | 3,654,661 |
from bs4 import BeautifulSoup
def clean_text(text):
"""
text: a string
return: modified initial string
"""
text = BeautifulSoup(text, "lxml").text # HTML decoding
text = text.lower() # lowercase text
# replace REPLACE_BY_SPACE_RE symbols by space in text
text = REPLACE_BY_SPACE_RE.sub(' ', text)
# delete symbols which are in BAD_SYMBOLS_RE from text
text = BAD_SYMBOLS_RE.sub('', text)
# delete stopwors from text
text = ' '.join(word for word in text.split() if word not in STOPWORDS)
return text | 6594bd61c2f1ff885948755a0dfc74e7256b9a3e | 3,654,662 |
import logging
import sys
def get_input_device(config):
""" Create the InputDevice instance and handle errors """
dev_path = config['flirc_device_path']
logging.debug('get_input_device() dev_path: %s', dev_path)
try:
input_device = InputDevice(dev_path)
return input_device
except FileNotFoundError as exception:
logging.error('Error opening device path %s', dev_path)
logging.error('Error was: %s', exception)
logging.error('FLIRC is likely not attached or the device path (FLIRC_DEV_PATH) is wrong')
sys.exit(1) | fef51e92f6e81ff873d0df821e871545a86eb900 | 3,654,663 |
def _simpsons_interaction(data, groups):
"""
Calculation of Simpson's Interaction index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Simpson's Interaction Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Equation 1 of page 37 of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Simpson's interaction index (I) can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to not belong to the same group.
Higher values means lesser segregation.
Simpson's Concentration + Simpson's Interaction = 1
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
Pk = df.sum(axis=0) / df.sum()
I = (Pk * (1 - Pk)).sum()
return I, core_data, groups | d7c4bc8bfb2d6db17868f0d140c2547e65cfd666 | 3,654,664 |
import os
def find_includes(armnn_include_env: str = INCLUDE_ENV_NAME):
"""Searches for ArmNN includes.
Args:
armnn_include_env(str): Environmental variable to use as path.
Returns:
list: A list of paths to include.
"""
armnn_include_path = os.getenv(armnn_include_env)
if armnn_include_path is not None and os.path.exists(armnn_include_path):
armnn_include_path = [armnn_include_path]
else:
armnn_include_path = ['/usr/local/include', '/usr/include']
return armnn_include_path | b601f8253d9c7e3f1966f0b5e8810d8f12576621 | 3,654,665 |
import sys
def read_missing_oids(oid_lines):
"""
Parse lines into oids.
>>> list(read_missing_oids([
... "!!! Users 0 ?", "POSKeyError: foo",
... "!!! Users 0 ?",
... "!!! Users 1 ?",
... "bad xref name, 1", "bad db", ]))
[0, 1]
"""
result = OidSet()
for line in oid_lines:
if line.startswith('bad') or ':' in line:
continue
if line.startswith('!!!'):
# zc.zodbdgc output. bad OID is always the
# third field.
try:
oid = int(line.split()[2])
except (ValueError, IndexError):
logger.info("Malformed zc.zodbdgc input: %s", line)
continue
result.add(oid)
else:
# Just an int
try:
oid = int(line)
except ValueError:
logger.info("Malformed input: %s", line)
else:
result.add(oid)
if oid_lines != sys.stdin:
oid_lines.close()
return result | 0f408ba7673fa78b57b2533d9ffe8ab5296a633c | 3,654,666 |
import json
def run(target='192.168.1.1', ports=[21,22,23,25,80,110,111,135,139,443,445,554,993,995,1433,1434,3306,3389,8000,8008,8080,8888]):
"""
Run a portscan against a target hostname/IP address
`Optional`
:param str target: Valid IPv4 address
:param list ports: Port numbers to scan on target host
:returns: Results in a nested dictionary object in JSON format
Returns onlne targets & open ports as key-value pairs in dictionary (JSON) object
"""
global tasks
global threads
global results
if not util.ipv4(target):
raise ValueError("target is not a valid IPv4 address")
if _ping(target):
for port in ports:
tasks.put_nowait((_scan, (target, port)))
for i in range(1, tasks.qsize()):
threads['portscan-%d' % i] = _threader()
for t in threads:
threads[t].join()
return json.dumps(results[target])
else:
return "Target offline" | 201e4dc1809553eb4fb57848d9e5f8001ccdef23 | 3,654,667 |
import torch
def subsequent_mask(size: int):
"""
Mask out subsequent positions (to prevent attending to future positions)
Transformer helper function.
:param size: size of mask (2nd and 3rd dim)
:return: Tensor with 0s and 1s of shape (1, size, size)
"""
mask = np.triu(np.ones((1, size, size)), k=1).astype("uint8")
return torch.from_numpy(mask) == 0 | f4e40d2e9ac944d3582ed16088e8096f75a5f29e | 3,654,668 |
from typing import Type
def _get_dist_class(
policy: Policy, config: TrainerConfigDict, action_space: gym.spaces.Space
) -> Type[TorchDistributionWrapper]:
"""Helper function to return a dist class based on config and action space.
Args:
policy (Policy): The policy for which to return the action
dist class.
config (TrainerConfigDict): The Trainer's config dict.
action_space (gym.spaces.Space): The action space used.
Returns:
Type[TFActionDistribution]: A TF distribution class.
"""
if hasattr(policy, "dist_class") and policy.dist_class is not None:
return policy.dist_class
elif config["model"].get("custom_action_dist"):
action_dist_class, _ = ModelCatalog.get_action_dist(
action_space, config["model"], framework="torch"
)
return action_dist_class
elif isinstance(action_space, Discrete):
return TorchCategorical
elif isinstance(action_space, Simplex):
return TorchDirichlet
else:
assert isinstance(action_space, Box)
if config["normalize_actions"]:
return (
TorchSquashedGaussian
if not config["_use_beta_distribution"]
else TorchBeta
)
else:
return TorchDiagGaussian | 6511786dff734ddb78ce7c28e19b651c70fe86e2 | 3,654,669 |
def timeexec(fct, number, repeat):
"""
Measures the time for a given expression.
:param fct: function to measure (as a string)
:param number: number of time to run the expression
(and then divide by this number to get an average)
:param repeat: number of times to repeat the computation
of the above average
:return: dictionary
"""
rep = timeit_repeat(fct, number=number, repeat=repeat)
ave = sum(rep) / (number * repeat)
std = (sum((x / number - ave)**2 for x in rep) / repeat)**0.5
fir = rep[0] / number
fir3 = sum(rep[:3]) / (3 * number)
las3 = sum(rep[-3:]) / (3 * number)
rep.sort()
mini = rep[len(rep) // 20] / number
maxi = rep[-len(rep) // 20] / number
return dict(average=ave, deviation=std, first=fir, first3=fir3,
last3=las3, repeat=repeat, min5=mini, max5=maxi, run=number) | 01ea6d74bed9d8a7d1b7793d3f8473bc6442f83f | 3,654,670 |
import sys
def is_bst(root):
""" checks if binary tree is binary search tree """
def is_bst_util(root, min_value, max_value):
""" binary search tree check utility function """
if root is None:
return True
if (root.data >= min_value and root.data < max_value
and is_bst_util(root.left, min_value, root.data)
and is_bst_util(root.right, root.data, max_value)):
return True
return False
return is_bst_util(root, -sys.maxsize - 1, sys.maxsize) | 46828b5b3fc1827908faf7b9bb646bc3b6594b30 | 3,654,671 |
def regexify(w, tags):
"""Convert a single component of a decomposition rule
from Weizenbaum notation to regex.
Parameters
----------
w : str
Component of a decomposition rule.
tags : dict
Tags to consider when converting to regex.
Returns
-------
w : str
Component of a decomposition rule converted to regex form.
"""
# 0 means "an indefinite number of words"
if w == '0':
w = '.*'
# A positive non-zero integer means "this specific amount of words"
elif w.isnumeric() and int(w) > 0:
w = r'(?:\b\w+\b[\s\r\n]*){' + w + '}'
# A word starting with @ signifies a tag
elif w[0] == "@":
# Get tag name
tag_name = w[1:].lower()
w = tag_to_regex(tag_name, tags)
else:
# Add word boundaries to match on a whole word basis
w = r'\b' + w + r'\b'
return w | 113a631674c5984d81f830c5e8ca840d95678aa1 | 3,654,672 |
def row_dot_product(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""
Returns a vectorized dot product between the rows of a and b
:param a: An array of shape (N, M) or (M, )
(or a shape that can be broadcast to (N, M))
:param b: An array of shape (N, M) or (M, )
(or a shape that can be broadcast to (N, M))
:return: A vector of shape (N, ) whose elements are the dot product of rows a, b
"""
return np.einsum('ij,ij->i', np.atleast_2d(a), np.atleast_2d(b)) | d2544f2957963d343bdeb079418a1a5d96373eb4 | 3,654,673 |
def pmg_pickle_dump(obj, filobj, **kwargs):
"""
Dump an object to a pickle file using PmgPickler.
Args:
obj : Object to dump.
fileobj: File-like object
\\*\\*kwargs: Any of the keyword arguments supported by PmgPickler
"""
return PmgPickler(filobj, **kwargs).dump(obj) | 4ac72623538ce463b1bfc183bcac90919e47c513 | 3,654,674 |
def condition_header(header, needed_keys=None):
"""Return a dictionary of all `needed_keys` from `header` after passing
their values through the CRDS value conditioner.
"""
header = { key.upper():val for (key, val) in header.items() }
if not needed_keys:
needed_keys = header.keys()
else:
needed_keys = [ key.upper() for key in needed_keys ]
conditioned = { key:condition_value(header[key]) for key in needed_keys }
return conditioned | cd8c39e355a05367d479e76bda6f0869c10f8130 | 3,654,675 |
from typing import OrderedDict
def get_generic_path_information(paths, stat_prefix=""):
"""
Get an OrderedDict with a bunch of statistic names and values.
"""
statistics = OrderedDict()
returns = [sum(path["rewards"]) for path in paths]
# rewards = np.vstack([path["rewards"] for path in paths])
rewards = np.concatenate([path["rewards"] for path in paths])
statistics.update(
create_stats_ordered_dict(
"Rewards", rewards, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Returns", returns, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
# print(paths[0]["env_infos"])
if "is_success" in paths[0]["env_infos"][0].keys():
acc_sum = [(np.sum([x['is_success'] for x in path["env_infos"]])>0).astype(float) for path in paths]
acc = np.sum(acc_sum) * 1.0 / len(paths)
statistics.update(
create_stats_ordered_dict(
"Success Num", np.sum(acc_sum), stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Traj Num", len(paths), stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Success Rate", acc, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
actions = [path["actions"] for path in paths]
# if isinstance(actions[0][0], np.ndarray):
# actions = np.vstack([path["actions"] for path in paths])
# else:
# actions = np.hstack([path["actions"] for path in paths])
statistics.update(
create_stats_ordered_dict(
"Actions", actions, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Ep. Len.",
np.array([len(path["terminals"]) for path in paths]),
stat_prefix=stat_prefix,
always_show_all_stats=True,
)
)
statistics["Num Paths"] = len(paths)
return statistics | a90995c43d588cee4869bfa8b3f6a1026d265aab | 3,654,676 |
import math
import numpy as np
def pad_images(images, nlayers):
"""
In Unet, every layer the dimension gets divided by 2
in the encoder path. Therefore the image size should be divisible by 2^nlayers.
"""
divisor = 2**nlayers
nlayers, x, y = images.shape # essentially setting nlayers to z direction so return is z, x, y
x_pad = int((math.ceil(x / float(divisor)) * divisor) - x)
y_pad = int((math.ceil(y / float(divisor)) * divisor) - y)
padded_image = np.pad(images, ((0,0),(0, x_pad), (0, y_pad)), 'constant', constant_values=(0, 0))
return padded_image | 671fa940d0a0ed87819335b60d12d9e268bf9932 | 3,654,677 |
def remove_measurements(measurements, model_dict, params=None):
"""Remove measurements from a model specification.
If provided, a params DataFrame is also reduced correspondingly.
Args:
measurements (str or list): Name(s) of the measurement(s) to remove.
model_dict (dict): The model specification. See: :ref:`model_specs`.
params (pandas.DataFrame or None): The params DataFrame for the full model.
Returns:
dict: The reduced model dictionary
pandas.DataFrame: The reduced parameter DataFrame (only if params is not None)
"""
out = deepcopy(model_dict)
for factor in model_dict["factors"]:
full = model_dict["factors"][factor]["measurements"]
reduced = [_remove_from_list(meas_list, measurements) for meas_list in full]
out["factors"][factor]["measurements"] = reduced
norminfo = model_dict["factors"][factor].get("normalizations", {})
if "loadings" in norminfo:
out["factors"][factor]["normalizations"][
"loadings"
] = _remove_measurements_from_normalizations(
measurements, norminfo["loadings"]
)
if "intercepts" in norminfo:
out["factors"][factor]["normalizations"][
"intercepts"
] = _remove_measurements_from_normalizations(
measurements, norminfo["intercepts"]
)
if params is not None:
out_params = _reduce_params(params, out)
out = (out, out_params)
return out | fffddf4368579c999648c29b4746006b38de140c | 3,654,678 |
def good2Go(SC, L, CC, STR):
"""
Check, if all input is correct and runnable
"""
if SC == 1 and L == 1 and CC == 1 and STR == 1:
return True
else:
print(SC, L, CC, STR)
return False | e49229df6b9b187e1840d5bc5c8a1a8e087a5a4e | 3,654,679 |
def __validate_tweet_name(tweet_name: str, error_msg: str) -> str:
"""Validate the tweet's name.
Parameters
----------
tweet_name : str
Tweet's name.
error_msg : str
Error message to display for an invalid name.
Returns
-------
str
Validated tweet name.
Raises
------
InvalidTweetName
Raised for invalid tweet names.
"""
if tweet_name == "":
raise InvalidTweetName(error_msg)
else:
return tweet_name | 7086aeac6ccd0afcad0d13e947f3b454f7333b9f | 3,654,680 |
def convert_event(obj):
"""
:type obj: :class:`sir.schema.modelext.CustomEvent`
"""
event = models.event(id=obj.gid, name=obj.name)
if obj.comment:
event.set_disambiguation(obj.comment)
if obj.type is not None:
event.set_type(obj.type.name)
event.set_type_id(obj.type.gid)
lifespan = convert_life_span(obj.begin_date, obj.end_date, obj.ended)
if lifespan.get_begin() is not None or lifespan.get_end() is not None:
event.set_life_span(lifespan)
if obj.time is not None:
event.set_time(datetime_to_string(obj.time))
if obj.area_links:
event.add_relation_list(convert_event_area_relation_list(obj.area_links))
if obj.artist_links:
event.add_relation_list(convert_artist_relation_list(obj.artist_links))
if obj.place_links:
event.add_relation_list(convert_place_relation_list(obj.place_links))
if obj.aliases:
event.set_alias_list(convert_alias_list(obj.aliases))
if obj.tags:
event.set_tag_list(convert_tag_list(obj.tags))
return event | 23a6a31abca03d0c92f6162ce28b8548dc95bdda | 3,654,681 |
from typing import Any
import requests
import json
def get_pr_review_status(pr: PullRequestDetails, per_page: int = 100) -> Any:
"""
References:
https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request
"""
url = (f"https://api.github.com/repos/{pr.repo.organization}/{pr.repo.name}"
f"/pulls/{pr.pull_id}/reviews"
f"?per_page={per_page};access_token={pr.repo.access_token}")
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Get review failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
return json.JSONDecoder().decode(response.content.decode()) | 5ce662ab5d82e374def95e5f3cc4da9f2d4dbf96 | 3,654,682 |
def make_sph_model(filename):
"""reads a spherical model file text file and generates interpolated values
Args:
filename:
Returns:
model:
"""
M = np.loadtxt(filename, dtype={'names': ('rcurve', 'potcurve', 'dpotcurve'),'formats': ('f4', 'f4', 'f4')},skiprows=1)
model = spherical_model()
model.rcurve = M['rcurve']
model.potcurve = M['potcurve']
model.dpotcurve = M['dpotcurve']
model.rcurve = M['rcurve']
model.potcurve = UnivariateSpline(model.rcurve,M['potcurve'],k=3)
model.dpotcurve = UnivariateSpline(model.rcurve,M['dpotcurve'],k=3)
return model | d86a88ffca93ee0618cf5a19aa015077247cffb0 | 3,654,683 |
import os
def list_dir(filepath):
"""List the files in the directory"""
return sorted(list(map(lambda x: os.path.join(filepath, x), os.listdir(filepath)))) | 29c50f132b5abfdfea819db58a816a83e6efaccd | 3,654,684 |
def minimum(x, y):
"""
Returns the min of x and y (i.e. x < y ? x : y) element-wise.
Parameters
----------
x : tensor.
Must be one of the following types: bfloat16, half, float32, float64, int32, int64.
y : A Tensor.
Must have the same type as x.
name : str
A name for the operation (optional).
Returns
-------
A Tensor. Has the same type as x
"""
return pd.minimum(x, y) | 384e7d15687d03f7b639fc50707712c94029620f | 3,654,685 |
def seconds_to_timestamp(seconds):
"""
Convert from seconds to a timestamp
"""
minutes, seconds = divmod(float(seconds), 60)
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%06.3f" % (hours, minutes, seconds) | 8b9806f05fe4796baae51001e69455e82fb51eed | 3,654,686 |
def query(querystring: str,
db: tsdb.Database,
**kwargs):
"""
Perform query *querystring* on the testsuite *ts*.
Note: currently only 'select' queries are supported.
Args:
querystring (str): TSQL query string
ts (:class:`delphin.itsdb.TestSuite`): testsuite to query over
kwargs: keyword arguments passed to the more specific query
function (e.g., :func:`select`)
Example:
>>> list(tsql.query('select i-id where i-length < 4', ts))
[[142], [1061]]
"""
queryobj = _parse_query(querystring)
if queryobj['type'] in ('select', 'retrieve'):
return _select(
queryobj['projection'],
queryobj['relations'],
queryobj['condition'],
db,
record_class=kwargs.get('record_class', None))
else:
# not really a syntax error; replace with TSQLError or something
# when the proper exception class exists
raise TSQLSyntaxError(queryobj['type'] + ' queries are not supported',
text=querystring) | fa43123b3e0c4706b738104c641836fa08a4fc35 | 3,654,687 |
def TTF_SizeUTF8(font, text, w, h):
"""Calculates the size of a UTF8-encoded string rendered with a given font.
See :func:`TTF_SizeText` for more info.
Args:
font (:obj:`TTF_Font`): The font object to use.
text (bytes): A UTF8-encoded bytestring of text for which the rendered
surface size should be calculated.
w (byref(:obj:`~ctypes.c_int`)): A pointer to an integer in which to
store the calculated surface width (in pixels).
h (byref(:obj:`~ctypes.c_int`)): A pointer to an integer in which to
store the calculated surface height (in pixels).
Returns:
int: 0 on success, or -1 on error (e.g. if a glyph is not found in
the font).
"""
return _funcs["TTF_SizeUTF8"](font, text, w, h) | 3d24382222b1795caa0981c659d00a717c22fc86 | 3,654,688 |
def get_mse(y_true, y_hat):
"""
Return the mean squared error between the ground truth and the prediction
:param y_true: ground truth
:param y_hat: prediction
:return: mean squared error
"""
return np.mean(np.square(y_true - y_hat)) | 3d4c1828abf5bf88607e4ca1a263c483105733aa | 3,654,689 |
def generate_v2_token(username, version, client_ip, issued_at_timestamp, email=''):
"""Creates the JSON Web Token with a new schema
:Returns: String
:param username: The name of person who the token identifies
:type username: String
:param version: The version number for the token
:type version: Integer/String
:param client_ip: The IP of machine the client used to request a token.
:type client_ip: String
:param email: The email address associated with a user.
:type email: String
"""
claims = {'exp' : issued_at_timestamp + const.AUTH_TOKEN_TIMEOUT,
'iat' : issued_at_timestamp,
'iss' : const.VLAB_URL,
'username' : username,
'version' : version,
'client_ip' : client_ip,
'email' : email,
}
return jwt.encode(claims, const.AUTH_TOKEN_SECRET, algorithm=const.AUTH_TOKEN_ALGORITHM) | dee10b68fc15ec730a7b8921f95a77804618879c | 3,654,690 |
import math
def choose(n, k):
"""return n choose k
resilient (though not immune) to integer overflow"""
if n == 1:
# optimize by far most-common case
return 1
return fact_div(n, max(k, n - k)) / math.factorial(min(k, n - k)) | fecd411a4148127f998f58d8d27668777bf5efbe | 3,654,691 |
from typing import List
def part_one(puzzle_input: List[str]) -> int:
"""Find the highest seat ID on the plane"""
return max(boarding_pass_to_seat_id(line) for line in puzzle_input) | 1ae95a7784f5348bb435483228630c8795d62d30 | 3,654,692 |
def readbit(val, bitidx):
""" Direct word value """
return int((val & (1<<bitidx))!=0) | 4ca368f89b2496ec46c1641835c1f2a0a1cdd573 | 3,654,693 |
def matrix_bombing_plan(m):
""" This method calculates sum of the matrix by
trying every possible position of the bomb and
returns a dictionary. Dictionary's keys are the
positions of the bomb and values are the sums of
the matrix after the damage """
matrix = deepcopy(m)
rows = len(m)
columns = len(m[0])
d = {}
for x in range(0, rows):
for y in range(0, columns):
p = (x, y)
neighbours = find_neighbour(matrix, (x, y))
d[p] = sum_matrix(neighbours)
return d | 013d1dc3685013fa6fd5c87cfc2513e07e66e310 | 3,654,694 |
def coord_to_gtp(coord, board_size):
""" From 1d coord (0 for position 0,0 on the board) to A1 """
if coord == board_size ** 2:
return "pass"
return "{}{}".format("ABCDEFGHJKLMNOPQRSTYVWYZ"[int(coord % board_size)],\
int(board_size - coord // board_size)) | a0419e8a7f39cd282585ed1d29d94bbded0e3f1c | 3,654,695 |
def test_alternative_clusting_method(ClusterModel):
"""
Test that users can supply alternative clustering method as dep injection
"""
def clusterer(X: np.ndarray, k: int, another_test_arg):
"""
Function to wrap a sklearn model as a clusterer for OptimalK
First two arguments are always the data matrix, and k, and can supply
"""
m = ClusterModel()
m.fit(X)
assert another_test_arg == "test"
return m.cluster_centers_, m.predict(X)
optimalk = OptimalK(
n_jobs=-1,
parallel_backend="joblib",
clusterer=clusterer,
clusterer_kwargs={"another_test_arg": "test"},
)
X, y = make_blobs(n_samples=50, n_features=2, centers=3)
n_clusters = optimalk(X, n_refs=3, cluster_array=np.arange(1, 5))
assert isinstance(n_clusters, int) | 173e376726abe943f15fae44aa746bf9abe7dd53 | 3,654,696 |
def load_dataset(spfile, twfile):
"""Loads dataset given the span file and the tweets file
Arguments:
spfile {string} -- path to span file
twfile {string} -- path to tweets file
Returns:
dict -- dictionary of tweet-id to Tweet object
"""
tw_int_map = {}
# for filen in os.listdir(txt_dir):
# twid = filen.split(".")[0]
# if twid == "tweet_id":
# continue
# tweet = Tweet(twid)
# tw_int_map[twid] = tweet
for line in open(twfile, 'r'):
#parts = line.split("\t")
#twid, text = parts[0], parts[1]
twid = get_basename_without_extension(line.strip('\n')) # ANTONIO
tweet = Tweet(twid)
if twid in tw_int_map:
log.warning("Possible duplicate %s", twid)
tw_int_map[twid] = tweet
# Load annotations
for line in open(spfile, 'r'):
parts = [x.strip() for x in line.split("\t")]
if len(parts) != 5:
log.warning("Tab delimited not correct:" + str(len(parts)))
continue
if len(parts) == 5:
twid, start, end, atype, prof = parts
if twid == "tweet_id":
continue
if twid in tw_int_map:
tweet = tw_int_map[twid]
else:
log.warning("Invalid tweetid %s not found.", twid)
continue
valid_labels = ["PROTEINAS", "NORMALIZABLES", "UNCLEAR","NO-NORMALIZABLES"]
if atype in valid_labels:
ann = Ann(prof.strip(), atype, start, end)
tweet.anns.append(ann)
tweet.has_ann = (tweet.has_ann or atype in valid_labels)
num_anns = sum([len(x.anns) for _, x in tw_int_map.items()])
log.info("Loaded dataset %s tweets. %s annotations.", len(tw_int_map), num_anns)
return tw_int_map | e25c382b3fe8c321b70206894e483c3f04ade2ed | 3,654,697 |
from typing import Union
from typing import Tuple
def nameof(var, *more_vars,
# *, keyword only argument, supported with python3.8+
frame: int = 1,
vars_only: bool = True) -> Union[str, Tuple[str]]:
"""Get the names of the variables passed in
Examples:
>>> a = 1
>>> nameof(a) # 'a'
>>> b = 2
>>> nameof(a, b) # ('a', 'b')
>>> x = lambda: None
>>> x.y = 1
>>> nameof(x.y, full=True) # 'x.y'
Note:
This function works with the environments where source code is
available, in other words, the callee's node can be retrieved by
`executing`. In some cases, for example, running code from python
shell/REPL or from `exec`/`eval`, we try to fetch the variable name
from the bytecode. This requires only a single variable name is passed
to this function and no keyword arguments, meaning that getting full
names of attribute calls are not supported in such cases.
Args:
var: The variable to retrieve the name of
*more_vars: Other variables to retrieve the names of
frame: The this function is called from the wrapper of it. `frame=1`
means no wrappers.
Note that the calls from standard libraries are ignored.
Also note that the wrapper has to have signature as this one.
vars_only: Whether only allow variables/attributes as arguments or
any expressions. If `True`, then the sources of the arguments
will be returned.
Returns:
The names/sources of variables/expressions passed in.
If a single argument is passed, return the name/source of it.
If multiple variables are passed, return a tuple of their
names/sources.
If the argument is an attribute (e.g. `a.b`) and `vars_only` is
`False`, only `"b"` will returned. Set `vars_only` to `True` to
get `"a.b"`.
Raises:
VarnameRetrievingError: When the callee's node cannot be retrieved or
trying to retrieve the full name of non attribute series calls.
"""
# Frame is anyway used in get_node
frameobj = IgnoreList.create(
ignore_lambda=False,
ignore_varname=False
).get_frame(frame)
node = get_node_by_frame(frameobj, raise_exc=True)
if not node:
# We can't retrieve the node by executing.
# It can be due to running code from python/shell, exec/eval or
# other environments where sourcecode cannot be reached
# make sure we keep it simple (only single variable passed and no
# full passed) to use bytecode_nameof
#
# We don't have to check keyword arguments here, as the instruction
# will then be CALL_FUNCTION_KW.
if not more_vars:
return bytecode_nameof(frameobj.f_code, frameobj.f_lasti)
# We are anyway raising exceptions, no worries about additional burden
# of frame retrieval again
source = frameobj.f_code.co_filename
if source == '<stdin>':
raise VarnameRetrievingError(
"Are you trying to call nameof in REPL/python shell? "
"In such a case, nameof can only be called with single "
"argument and no keyword arguments."
)
if source == '<string>':
raise VarnameRetrievingError(
"Are you trying to call nameof from exec/eval? "
"In such a case, nameof can only be called with single "
"argument and no keyword arguments."
)
raise VarnameRetrievingError(
"Source code unavailable, nameof can only retrieve the name of "
"a single variable, and argument `full` should not be specified."
)
return argname(
var, *more_vars,
func=nameof,
frame=frame,
vars_only=vars_only,
pos_only=True
) | 4a7c7d8390dad2597cad65409aaa6cd3f716a8a8 | 3,654,698 |
import os
def cache_get_filepath(key):
"""Returns cache path."""
return os.path.join(settings.CACHE_PATH, key) | 7b6ace1c68a4783a95390b4dcf1c658b7b9db0b2 | 3,654,699 |
Subsets and Splits