content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Any
def vgg16(pretrained:bool=False,progress:bool=True,**kwargs:Any) ->VGG:
"""
Args:
pretrained(bool):是否加载预训练参数
progress(bool):是否显示下载数据的进度条
Return:
返回VGG模型
"""
return _vgg("vgg16","D",False,pretrained,progress,**kwargs) | 82d64394a705caa9e0a0c1e661078c9ea299fa05 | 3,656,746 |
def cy_gate(N=None, control=0, target=1):
"""Controlled Y gate.
Returns
-------
result : :class:`qutip.Qobj`
Quantum object for operator describing the rotation.
"""
if (control == 1 and target == 0) and N is None:
N = 2
if N is not None:
return gate_expand_2toN(cy_gate(), N, control, target)
return Qobj([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1j],
[0, 0, 1j, 0]],
dims=[[2, 2], [2, 2]]) | 8927557d0afe096218acf1ac0283c5ec073e3f98 | 3,656,747 |
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
**Examples**
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(1 + x)**2*(2 + x)**3
"""
return _generic_factor(f, gens, args, method='sqf') | 5f0267b7c314269e64c32951824346542e3e3452 | 3,656,748 |
def update_options_dpd2(dpd1_val):
"""
Updates the contents of the second dropdown menu based of the value of the first dropdown.
:param dpd1_val: str, first dropdown value
:return: list of dictionaries, labels and values
"""
all_options = [
strings.CITY_GDANSK,
strings.CITY_GDYNIA,
strings.CITY_KALINGRAD,
strings.CITY_KLAIPEDA,
strings.CITY_STPETERBURG,
]
all_options.remove(dpd1_val)
options = [{"label": opt, "value": opt} for opt in all_options]
return options | 4a2d0494f04e3026b133f61a70757046f011b5f1 | 3,656,749 |
import csv
def compute_min_paths_from_monitors(csv_file_path, delimiter='\t', origin_as=PEERING_ORIGIN):
"""
Inputs: csv_file_path, delimiter : csv file containing entries with the following format:
|collector|monitor|as_path, and the delimiter used
origin_as: the ASN you want to use as the terminal one for the as_path length computation
Output: A dictionary that contains for each monitor found in the given csv file, the minimum length path
and its length.
"""
monitor_routes = {} # contains the minimum length found for each route monitor
# key:monitor(string), value: (minimum as_path length(integer),
# the minimum length as_path(list of positive integers))
with open(csv_file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delimiter)
row_count = 0
for row in csv_reader:
row_count += 1
monitor = row[1]
# AS-path prep removing prepending and bgp poisoning
as_path_list = AS_path().make_list(row[2]) # as_path(string) -> as_path (list of positive integers)
as_path_rem_prepend = AS_path().remove_prependings(as_path_list)
as_path_cleared = AS_path().remove_loops(as_path_rem_prepend)
as_path_length = AS_path().count_length(as_path_cleared, origin_as)
if monitor in monitor_routes.keys():
if monitor_routes[monitor][0] > as_path_length:
monitor_routes[monitor] = (as_path_length, as_path_cleared)
else:
monitor_routes[monitor] = (as_path_length, as_path_cleared)
return monitor_routes | 6f0c1e26062213ea14af80a803c6e6ebd25c6543 | 3,656,750 |
def _blanking_rule_ftld_or3a(rule):
""" See _blanking_rule_ftld_or2a for rules """
if rule == 'Blank if Question 1 FTDIDIAG = 0 (No)':
return lambda packet: packet['FTDIDIAG'] == 0
elif rule == 'Blank if Question 3 FTDFDGPE = 0 (No)':
return lambda packet: packet['FTDFDGPE'] == 0
elif rule == 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)':
return lambda packet: packet['FTDFDGFh'] in (0, 9)
elif rule == 'Blank if Question 3a11, FTDFDGOA, ne 1 (Yes)':
return lambda packet: packet['FTDFDGOA'] != 1
else:
return lambda packet: False | cc0a925c7ad6ad72c4041d369f9a820c0a6a6b96 | 3,656,751 |
def compute_angular_differences(matrix, orientation1, orientation2, cutoff):
""" Compute angular difference between two orientation ndarrays
:param matrix: domain matrix
:type matrix: np.ndarray
:param orientation1: orientation as (x, y, z, 3)
:type orientation1: np.ndarray
:param orientation2: orientation as (x, y, z, 3)
:type orientation2: np.ndarray
:param cutoff: to binarize domain
:type cutoff: (int, int)
:return: angle_errors in degrees, mean, std
:rtype: (np.ndarray, float, float)
"""
if not isinstance(matrix, np.ndarray) or not isinstance(orientation1, np.ndarray) or not isinstance(orientation2, np.ndarray):
raise Exception("Inputs must be ndarrays.")
if not isinstance(cutoff, tuple) or not len(cutoff) == 2:
raise Exception("Cutoff must be a tuple(int, int).")
if not (orientation1.ndim == 4 and orientation2.ndim == 4 and matrix.ndim == 3 and orientation1.shape[3] == 3 and
orientation1.shape == orientation2.shape and orientation1.shape[0] == matrix.shape[0] and
orientation1.shape[1] == matrix.shape[1] and orientation1.shape[2] == matrix.shape[2]):
raise Exception("Incorrect dimensions in input ndarrays.")
mask = np.logical_and(matrix >= cutoff[0], matrix <= cutoff[1])
unit_vectors_1 = orientation1[mask]
unit_vectors_2 = orientation2[mask]
radians_diff = np.einsum('ij,ij->i', unit_vectors_1, unit_vectors_2)
diff = np.zeros((unit_vectors_1.shape[0], 2), dtype=float)
diff[:, 0] = np.degrees(np.arccos(np.clip(radians_diff, -1, 1)))
diff[:, 1] = 180 - diff[:, 0]
diff = np.min(diff, axis=1)
angle_diff = np.zeros_like(matrix, dtype=float)
angle_diff[mask] = diff
return angle_diff, diff.mean(), diff.std() | 3ec860c484057de91eb306079328faff87a9b0e4 | 3,656,753 |
def await(*args):
"""Runs all the tasks specified in args,
and finally returns args unwrapped.
"""
return _await(args) | 1065986a6ac067222bf5c6ff47a395ab4d0c890e | 3,656,754 |
def convert_action_move_exists(action, board, player_turn):
"""
Converts action index to chess.Move object.
Assume the action key exists in map_action_uci
:param action:
:param board:
:param player_turn:
:return:
"""
move = chess.Move.from_uci(map_action_uci[action])
if player_turn == chess.BLACK:
move = chess.Move(from_square=chess.square_mirror(move.from_square),
to_square=chess.square_mirror(move.to_square), promotion=move.promotion)
if move.promotion == chess.QUEEN:
move.promotion = None
rank = move.to_square//8
try:
if move.promotion is None and board.piece_at(move.from_square).piece_type == chess.PAWN and \
(rank == 7 or rank == 0):
move.promotion = chess.QUEEN
except AttributeError as err:
print(board, move, action, player_turn)
raise AttributeError(err)
return move | f4c508a99967d65b6f2f07159fe3003730b220a2 | 3,656,755 |
import pwd
import win32api
def _get_system_username():
"""Return the current system user."""
if not win32:
return pwd.getpwuid(getuid())[0]
else:
return win32api.GetUserName() | 4dfdc93630d2c3940c7087fc2125f81f0e385d9f | 3,656,757 |
import glob
def _get_vmedia_device():
"""Finds the device filename of the virtual media device using sysfs.
:returns: a string containing the filename of the virtual media device
"""
sysfs_device_models = glob.glob("/sys/class/block/*/device/model")
vmedia_device_model = "virtual media"
for model_file in sysfs_device_models:
try:
with open(model_file) as model_file_fobj:
if vmedia_device_model in model_file_fobj.read().lower():
vmedia_device = model_file.split('/')[4]
return vmedia_device
except Exception:
pass | e8f8e83b7bf0c73d10d8893a5b4b49670edba7ac | 3,656,758 |
def convert_to_posixpath(file_path):
"""Converts a Windows style filepath to posixpath format. If the operating
system is not Windows, this function does nothing.
Args:
file_path: str. The path to be converted.
Returns:
str. Returns a posixpath version of the file path.
"""
if not is_windows_os():
return file_path
return file_path.replace('\\', '/') | 9a8e6559b7916ba7547f87ce3bba6b50362c7ded | 3,656,759 |
def generateCards(numberOfSymb):
"""
Generates a list of cards which are themselves a list of symbols needed on each card to respect the rules of Dobble.
This algorithm was taken from the french Wikipedia page of "Dobble".
https://fr.wikipedia.org/wiki/Dobble
:param numberOfSymb: Number of symbols needed on each card.
:type numberOfSymb: int
:returns: List of cards which are list of symbols on it.
:rtype: List[List[int]]
"""
nbSymByCard = numberOfSymb
nbCards = (nbSymByCard**2) - nbSymByCard + 1
cards = []
n = nbSymByCard - 1
t = []
t.append([[(i+1)+(j*n) for i in range(n)] for j in range(n)])
for ti in range(n-1):
t.append([[t[0][((ti+1)*i) % n][(j+i) % n] for i in range(n)] for j in range(n)])
t.append([[t[0][i][j] for i in range(n)] for j in range(n)])
for i in range(n):
t[0][i].append(nbCards - n)
t[n][i].append(nbCards - n + 1)
for ti in range(n-1):
t[ti+1][i].append(nbCards - n + 1 + ti + 1)
t.append([[(i+(nbCards-n)) for i in range(nbSymByCard)]])
for ti in t:
cards = cards + ti
return cards | 8f51c1f339d62b6fd88cb8d0fae692053bffc084 | 3,656,760 |
def match_facilities(facility_datasets,
authoritative_dataset,
manual_matches_df=None,
max_distance=150,
nearest_n=10,
meters_crs='epsg:5070',
reducer_fn=None):
"""Matches facilities. The dataset represented by the authoritative_dataset key
in the facilities_dfs dict will considered authoritative - all other facilities
in the remaining datasets will be dropped if they are not matched, and the point
location of the authoritative dataset will be used.
Args:
facility_datasets (Dict[str, Dict]): A dictionary keyed by
the dataset ID with values being a dictionary containing keys
'df' containing the dataframe of facility data and 'columns'
containing a FacilityColumns object.
authoritative_dataset: The dataset that contains the facilities all
other datasets will match to.
manual_matches_df: Dataframe containing manually matched facilities. Should contain
columns for each of the ID columns of the datasets with matching IDs in each row.
max_distance (int, optional): The maximum distance (in meters) that two matches can be apart.
Defaults to 150 meters.
nearest_n (int, optional): The number of neighbors to consider as potential options.
Defaults to 10.
meters_crs: The EPSG code for the projection to use for meters distance computations.
Defaults to EPSG:5070 (NAD83 / Conus Albers) for the U.S.
reducer_fn: Function to reduce potentially matched facilities. Defaults to
reduce_matched_facility_records. See that function's signature for required
parameters. Pass in alternate implementations to implement other matching approaches.
Result:
(FacilityMatchResult): The result of the match.
Note:
The resulting dataframes will convert the id columns of any dataset into a str type.
"""
MATCH_ID_SEP = '_-_'
if reducer_fn is None:
reducer_fn = reduce_matched_facility_records
def get_id_column(dataset_key):
return facility_datasets[dataset_key]['columns'].facility_id
def get_matched_set(subcomponent):
"""Method for collecting the data for the reducer_fn based on a
connected subcomponent. Returns the records of the matched set and a dictionary
that records the distances between the facilities.
"""
records = []
distances = {}
manual_matches = set([])
for n in s:
ds, facility_id = deconstruct_match_id(n)
df = facility_datasets[ds]['df']
id_column = facility_datasets[ds]['columns'].facility_id
record = df[df[id_column].astype(str) == facility_id].to_dict(orient='record')[0]
record['dataset'] = ds
record['match_id'] = n
records.append(record)
for u, v in G.edges(n):
edge_data = G.get_edge_data(u, v)
distances[(u, v)] = edge_data['weight']
if ds == authoritative_dataset and edge_data.get('manual_override', False):
connected_ds, _ = deconstruct_match_id(v)
manual_matches.add((u, connected_ds, v))
return records, distances, manual_matches
def construct_match_id(dataset_key, facility_id):
id_column = get_id_column(dataset_key)
return '{}{}{}'.format(
dataset_key,
MATCH_ID_SEP,
facility_id
)
def deconstruct_match_id(match_id):
return match_id.split(MATCH_ID_SEP)
assert authoritative_dataset in facility_datasets
# check that dataset ID columns are unique
dataset_id_columns = [
get_id_column(dataset_key)
for dataset_key in facility_datasets
]
if len(set(dataset_id_columns)) != len(dataset_id_columns):
raise Exception('Dataset ID column names must be unique.')
# Setup a distinct order of datasets
dataset_order = [authoritative_dataset] + sorted([x for x in facility_datasets
if x != authoritative_dataset])
# Set of match_ids
ids = []
# Set of (x,y) points aligned with ids, in meters_crs
pts = []
# Mapping from match_id -> point
ids_to_pts = {}
# Construct a reprojected geodataframe per dataset, and
# record the match ids and points for usage in the KNN
# computation below.
for dataset_key in dataset_order:
df = facility_datasets[dataset_key]['df']
meters_df = df.to_crs(meters_crs)
id_column = get_id_column(dataset_key)
meters_df['match_id'] = '{}{}'.format(dataset_key, MATCH_ID_SEP) + \
meters_df[id_column].astype(str)
facility_datasets[dataset_key]['meters_df'] = meters_df
for _, row in meters_df.iterrows():
match_id = row['match_id']
pt = (row['geometry'].x, row['geometry'].y)
ids_to_pts[match_id] = pt
ids.append(match_id)
pts.append(pt)
# Compute the K Nearest Neighbors for all points in the dataset.
kd_tree = libpysal.cg.KDTree(np.array(pts))
nearest_neighbors = libpysal.weights.KNN(kd_tree, k=nearest_n, ids=ids).neighbors
# For every match, make an edge in a graph. Don't add an edge between
# points that are further than the max distance. The weight of the edge
# is the distance between them in meters.
G = nx.Graph()
for match_id in nearest_neighbors:
source_pt = ids_to_pts[match_id]
G.add_node(match_id)
for neighbor_id in nearest_neighbors[match_id]:
neighbor_pt = ids_to_pts[neighbor_id]
dist = euclidean(source_pt, neighbor_pt)
if dist <= max_distance and not G.has_edge(match_id, neighbor_id):
G.add_edge(match_id, neighbor_id, weight=dist)
# Create edges for manual matches and mark them as such.
if manual_matches_df is not None:
auth_id_column = facility_datasets[authoritative_dataset]['columns'].facility_id
for _, row in manual_matches_df.iterrows():
# Get the authoritative dataset ID (required for each row)
auth_id = construct_match_id(authoritative_dataset, row[auth_id_column])
source_pt = ids_to_pts[auth_id]
for dataset_key in facility_datasets:
if dataset_key != authoritative_dataset:
id_column = facility_datasets[dataset_key]['columns'].facility_id
if id_column in row:
if row[id_column]:
neighbor_id = construct_match_id(dataset_key, row[id_column])
neighbor_pt = ids_to_pts[neighbor_id]
dist = euclidean(source_pt, neighbor_pt)
G.add_edge(auth_id, neighbor_id, weight=dist, manual_override=True)
# Set up a dict to be turned into the matches dataframe,
# and a dict that tracks what non-authoritative datasets
# have been matched.
matches = {}
matched_ids = {}
for dataset_key in dataset_order:
matches[get_id_column(dataset_key)] = []
if dataset_key != authoritative_dataset:
matched_ids[dataset_key] = set([])
dataset_columns = dict([(k, facility_datasets[k]['columns']) for k in facility_datasets])
# Iterate over connected components, which gives us the subgraphs that are
# matched, and pass this into the reduce_matches method to
# reduce down each match to a single matched set.
for s in nx.connected_components(G):
# Ignore components that don't have a point from the authoritative dataset.
if authoritative_dataset in [deconstruct_match_id(m)[0] for m in s]:
records, distances, manual_matches = get_matched_set(s)
if len(records) == 1:
reduced_components = [[records[0]['match_id']]]
else:
authoritative_records = [r for r in records if r['dataset'] == authoritative_dataset]
records_to_match = [r for r in records if r['dataset'] != authoritative_dataset]
reduced_components = reducer_fn(authoritative_records,
records_to_match,
distances,
manual_matches,
dataset_columns)
for match_set in reduced_components:
# Ensure that the set has a facility from the authoritative datatset
assert authoritative_dataset in [deconstruct_match_id(match_id)[0]
for match_id in match_set]
ds_ids = {}
for m in match_set:
dataset_key, facility_id = deconstruct_match_id(m)
ds_ids[dataset_key] = facility_id
if dataset_key != authoritative_dataset:
matched_ids[dataset_key].add(facility_id)
for dataset_key in dataset_order:
col = get_id_column(dataset_key)
if not dataset_key in ds_ids:
matches[col].append(None)
else:
matches[col].append(ds_ids[dataset_key])
# Construct the FacilityMatchResult and return
matches_df = pd.DataFrame.from_dict(matches)
unmatched_per_dataset = {}
for dataset_key in matched_ids:
ids = set(facility_datasets[dataset_key]['df'][get_id_column(dataset_key)].astype(str).values)
unmatched_per_dataset[dataset_key] = ids - matched_ids[dataset_key]
# Merge the dataframes, using the geometry from the authoritative dataset and
# prefixing all but the ID columns by the dataset ID.
merged_df = matches_df
for dataset_key in dataset_order:
df = facility_datasets[dataset_key]['df']
id_column = get_id_column(dataset_key)
if dataset_key != authoritative_dataset:
df_prefixed = df.copy().add_prefix('{}_'.format(dataset_key))
df_prefixed = df_prefixed.rename(columns={'{}_{}'.format(dataset_key, id_column): id_column})
df_prefixed = df_prefixed.drop(columns=['{}_geometry'.format(dataset_key)])
else:
df_prefixed = df.copy()
df_prefixed[id_column] = df_prefixed[id_column].astype(str)
merged_df = merged_df.merge(df_prefixed, on=id_column, how='left')
merged_df = gpd.GeoDataFrame(merged_df, crs='epsg:4326') \
.sort_values([facility_datasets[dataset_key]['columns'].facility_id
for dataset_key in dataset_order])
return FacilityMatchResult(merged_df, matches_df, unmatched_per_dataset) | 97f169ccda8cf0b26bfa423936d8b663e6237d22 | 3,656,762 |
def has_field_warning(meta, field_id):
"""Warn if dataset has existing field with same id."""
if meta.has_field(field_id):
print(
"WARN: Field '%s' is already present in dataset, not overwriting."
% field_id
)
print("WARN: Use '--replace' flag to overwrite existing field.")
return 1
return 0 | 1cc5016f8ffcce698bcb53dcf6f307b760d7df55 | 3,656,763 |
def volume(surface):
"""Compute volume of a closed triangulated surface mesh."""
properties = vtk.vtkMassProperties()
properties.SetInput(surface)
properties.Update()
return properties.GetVolume() | 1969e3c6245cd76c50cdea19be41165ff16f73fc | 3,656,764 |
def simulate():
"""
Simulate one thing
"""
doors = getRandomDoorArray()
pickedDoor = chooseDoor()
goatDoor, switchDoor = openGoatDoor(pickedDoor, doors)
return doors[pickedDoor], doors[switchDoor] | 607fc6d0bdb5d24dc68c371c81e9e7028a54631f | 3,656,765 |
def fc_layer(x):
"""Basic Fully Connected (FC) layer with an activation function."""
return x | f26865e13065187363746b8bfe7d95ac221bf236 | 3,656,766 |
from typing import Dict
import collections
def get_slot_counts(cls: type) -> Dict[str, int]:
"""
Collects all of the given class's ``__slots__``, returning a
dict of the form ``{slot_name: count}``.
:param cls: The class whose slots to collect
:return: A :class:`collections.Counter` counting the number of occurrences of each slot
"""
slot_names = (name for name, _ in iter_slots(cls))
return collections.Counter(slot_names) | 7cb7c41c1d4f40aab1acd473f5e1238e4aefad44 | 3,656,767 |
def rot6d_to_axisAngle(x):
""""Convert 6d rotation representation to axis angle
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3) Batch of corresponding axis angle
"""
rotMat = rot6d_to_rotmat(x)
return rotationMatrix_to_axisAngle(rotMat) | 17b24e0bb7521baa56df034c4e59658d4320c4cf | 3,656,768 |
import six
def within_tolerance(x, y, tolerance):
"""
Check that |x-y| <= tolerance with appropriate norm.
Args:
x: number or array (np array_like)
y: number or array (np array_like)
tolerance: Number or PercentageString
NOTE: Calculates x - y; may raise an error for incompatible shapes.
Usage
=====
The tolerance can be a number:
>>> within_tolerance(10, 9.01, 1)
True
>>> within_tolerance(10, 9.01, 0.5)
False
If tolerance is a percentage, it is a percent of (the norm of) x:
>>> within_tolerance(10, 9.01, '10%')
True
>>> within_tolerance(9.01, 10, '10%')
False
Works for vectors and matrices:
>>> A = np.array([[1,2],[-3,1]])
>>> B = np.array([[1.1, 2], [-2.8, 1]])
>>> diff = round(np.linalg.norm(A-B), 6)
>>> diff
0.223607
>>> within_tolerance(A, B, 0.25)
True
"""
# When used within graders, tolerance has already been
# validated as a Number or PercentageString
if isinstance(tolerance, six.text_type):
tolerance = np.linalg.norm(x) * percentage_as_number(tolerance)
difference = x - y
return np.linalg.norm(difference) <= tolerance | 918b14e33aeca426e24151d7a1eda2d340423b4d | 3,656,769 |
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c | af1f024f4b6d60793fb3aa6ca2bcbe517c4b178f | 3,656,770 |
def model_netradiation(minTair = 0.7,
maxTair = 7.2,
albedoCoefficient = 0.23,
stefanBoltzman = 4.903e-09,
elevation = 0.0,
solarRadiation = 3.0,
vaporPressure = 6.1,
extraSolarRadiation = 11.7):
"""
- Description:
* Title: NetRadiation Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA Montpellier
* Abstract: It is calculated at the surface of the canopy and is givenby the difference between incoming and outgoing radiation of both short
and long wavelength radiation
- inputs:
* name: minTair
** min : -30
** default : 0.7
** max : 45
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : °C
** description : minimum air temperature
* name: maxTair
** min : -30
** default : 7.2
** max : 45
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : °C
** description : maximum air Temperature
* name: albedoCoefficient
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0.23
** inputtype : parameter
** unit :
** description : albedo Coefficient
* name: stefanBoltzman
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 4.903E-09
** inputtype : parameter
** unit :
** description : stefan Boltzman constant
* name: elevation
** parametercategory : constant
** min : -500
** datatype : DOUBLE
** max : 10000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0
** inputtype : parameter
** unit : m
** description : elevation
* name: solarRadiation
** min : 0
** default : 3
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m-2 d-1
** description : solar Radiation
* name: vaporPressure
** min : 0
** default : 6.1
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : hPa
** description : vapor Pressure
* name: extraSolarRadiation
** min : 0
** default : 11.7
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m2 d-1
** description : extra Solar Radiation
- outputs:
* name: netRadiation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : MJ m-2 d-1
** description : net radiation
* name: netOutGoingLongWaveRadiation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : net OutGoing Long Wave Radiation
"""
Nsr = (1.0 - albedoCoefficient) * solarRadiation
clearSkySolarRadiation = (0.75 + (2 * pow(10.0, -5) * elevation)) * extraSolarRadiation
averageT = (pow(maxTair + 273.16, 4) + pow(minTair + 273.16, 4)) / 2.0
surfaceEmissivity = 0.34 - (0.14 * sqrt(vaporPressure / 10.0))
cloudCoverFactor = 1.35 * (solarRadiation / clearSkySolarRadiation) - 0.35
Nolr = stefanBoltzman * averageT * surfaceEmissivity * cloudCoverFactor
netRadiation = Nsr - Nolr
netOutGoingLongWaveRadiation = Nolr
return (netRadiation, netOutGoingLongWaveRadiation) | 369fffe5eef94148baa8526421adbbac7c3477fd | 3,656,771 |
def get_tagset(sentences, with_prefix):
""" Returns the set of entity types appearing in the list of sentences.
If with_prefix is True, it returns both the B- and I- versions for each
entity found. If False, it merges them (i.e., removes the prefix and only
returns the entity type).
"""
iobs = [iob for sent in sentences for (x,iob) in sent]
tagset = set(iobs)
if not with_prefix:
tagset = set([t[2:] for t in list(tagset) if t != 'O'])
return tagset | c0b00f7c5546bfc7fe10b2d4b35998b5dedeba21 | 3,656,772 |
def normpath(s: str) -> str:
"""Normalize path. Just for compatibility with normal python3."""
return s | 30c528b11f75f52275b753c789e2e3d5bf71641c | 3,656,774 |
def threshold_num_spikes(
sorting,
threshold,
threshold_sign,
sampling_frequency=None,
**kwargs
):
"""
Computes and thresholds the num spikes in the sorted dataset with the given sign and value.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
threshold: int or float
The threshold for the given metric
threshold_sign: str
If 'less', will threshold any metric less than the given threshold
If 'less_or_equal', will threshold any metric less than or equal to the given threshold
If 'greater', will threshold any metric greater than the given threshold
If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
threshold sorting extractor
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None, apply_filter=False,
duration_in_frames=None, freq_min=300.0, freq_max=6000.0, unit_ids=None,
verbose=params_dict['verbose'], raise_if_empty=False)
ns = NumSpikes(metric_data=md)
threshold_sorting = ns.threshold_metric(threshold, threshold_sign, **kwargs)
return threshold_sorting | 37974060e23f8dbde2d3dd1246b0583ed16d4a87 | 3,656,775 |
def mad(stack, axis=0, scale=1.4826):
"""Median absolute deviation,
default is scaled such that +/-MAD covers 50% (between 1/4 and 3/4)
of the standard normal cumulative distribution
"""
stack_abs = np.abs(stack)
med = np.nanmedian(stack_abs, axis=axis)
return scale * np.nanmedian(np.abs(stack_abs - med), axis=axis) | c9425b8006476b11cc559a025597c5b620294b50 | 3,656,776 |
def _get_window(append, size=(1000, 600)):
"""
Return a handle to a plot window to use for this plot.
If append is False, create a new plot window, otherwise return
a handle to the given window, or the last created window.
Args:
append (Union[bool, PlotWindow]): If true, return the last
created plot window, if PlotWindow, return that window, otherwise
a new window will be created.
size (Tuple[int, int]): The size in px of the new plot window. If append
is not false, this parameter has no effect.
"""
# Set up a plotting window
if append is None or append is False:
win = PlotWindow()
win.win_title = 'ID: '
win.resize(*size)
elif isinstance(append, PlotWindow):
# Append to the given window
win = append
elif isinstance(append, bool):
# Append to the last trace if true
win = PlotWindow.getWindows()[-1]
else:
raise ValueError("Unknown argument to append. Either give a plot window"
" or true to append to the last plot")
return win | 45ff89055db2caa442f55c80042820194554bed8 | 3,656,778 |
def _proxies_dict(proxy):
"""Makes a proxy dict appropriate to pass to requests."""
if not proxy:
return None
return {'http': proxy, 'https': proxy} | ce51015dc652c494dc89bb11e21f18803ba34c85 | 3,656,779 |
def _get_schedule_times(name, date):
"""
Fetch all `from_time` from [Healthcare Schedule Time Slot]
:param name: [Practitioner Schedule]
:param date: [datetime.date]
:return:
"""
mapped_day = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
time_slots = frappe.get_all(
'Healthcare Schedule Time Slot',
filters={'parent': name, 'day': mapped_day[date.weekday()]},
fields=['from_time']
)
return list(map(lambda x: x.get('from_time'), time_slots)) | 64de318f8bbe827e40566799172590f0e448a3d5 | 3,656,780 |
def delete(client, data, force=False):
"""
"""
param = {'logical-router-port-id': get_id(client, data)}
if force:
param['force'] = True
request = client.__getattr__(MODULE).DeleteLogicalRouterPort(**param)
response, _ = request.result()
return response | e573b962273f4ffc0ea4b0d5693329013fca4a6b | 3,656,781 |
import warnings
def z_standardization(
spark,
idf,
list_of_cols="all",
drop_cols=[],
pre_existing_model=False,
model_path="NA",
output_mode="replace",
print_impact=False,
):
"""
Standardization is commonly used in data pre-processing process. z_standardization standardizes the selected
attributes of an input dataframe by normalizing each attribute to have standard deviation of 1 and mean of 0. For
each attribute, the standard deviation (s) and mean (u) are calculated and a sample x will be standardized into (
x-u)/s. If the standard deviation of an attribute is 0, it will be excluded in standardization and a warning will
be shown. None values will be kept as None in the output dataframe.
Parameters
----------
spark
Spark Session
idf
Input Dataframe
list_of_cols
List of numerical columns to transform e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
"all" can be passed to include all numerical columns for analysis. This is super useful instead of specifying all column names manually.
Please note that this argument is used in conjunction with drop_cols i.e. a column mentioned in
drop_cols argument is not considered for analysis even if it is mentioned in list_of_cols. (Default value = "all")
drop_cols
List of columns to be dropped e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
It is most useful when coupled with the “all” value of list_of_cols, when we need to consider all columns except
a few handful of them. (Default value = [])
pre_existing_model
Boolean argument – True or False. True if model files (Mean/stddev for each feature) exists already, False Otherwise (Default value = False)
model_path
If pre_existing_model is True, this argument is path for referring the pre-saved model.
If pre_existing_model is False, this argument can be used for saving the model.
Default "NA" means there is neither pre-existing model nor there is a need to save one.
output_mode
"replace", "append".
“replace” option replaces original columns with transformed column. “append” option append transformed
column to the input dataset with a postfix "_scaled" e.g. column X is appended as X_scaled. (Default value = "replace")
print_impact
True, False (Default value = False)
This argument is to print out the before and after descriptive statistics of rescaled columns.
Returns
-------
DataFrame
Rescaled Dataframe
"""
num_cols = attributeType_segregation(idf)[0]
if list_of_cols == "all":
list_of_cols = num_cols
if isinstance(list_of_cols, str):
list_of_cols = [x.strip() for x in list_of_cols.split("|")]
if isinstance(drop_cols, str):
drop_cols = [x.strip() for x in drop_cols.split("|")]
list_of_cols = list(set([e for e in list_of_cols if e not in drop_cols]))
if any(x not in num_cols for x in list_of_cols):
raise TypeError("Invalid input for Column(s)")
if len(list_of_cols) == 0:
warnings.warn(
"No Standardization Performed - No numerical column(s) to transform"
)
return idf
if output_mode not in ("replace", "append"):
raise TypeError("Invalid input for output_mode")
parameters = []
excluded_cols = []
if pre_existing_model:
df_model = spark.read.parquet(model_path + "/z_standardization")
for i in list_of_cols:
mapped_value = (
df_model.where(F.col("feature") == i)
.select("parameters")
.rdd.flatMap(lambda x: x)
.collect()[0]
)
parameters.append(mapped_value)
else:
for i in list_of_cols:
mean, stddev = idf.select(F.mean(i), F.stddev(i)).first()
parameters.append(
[float(mean) if mean else None, float(stddev) if stddev else None]
)
if stddev:
if round(stddev, 5) == 0.0:
excluded_cols.append(i)
else:
excluded_cols.append(i)
if len(excluded_cols) > 0:
warnings.warn(
"The following column(s) are excluded from standardization because the standard deviation is zero:"
+ str(excluded_cols)
)
odf = idf
for index, i in enumerate(list_of_cols):
if i not in excluded_cols:
modify_col = (i + "_scaled") if (output_mode == "append") else i
odf = odf.withColumn(
modify_col, (F.col(i) - parameters[index][0]) / parameters[index][1]
)
if (not pre_existing_model) & (model_path != "NA"):
df_model = spark.createDataFrame(
zip(list_of_cols, parameters), schema=["feature", "parameters"]
)
df_model.coalesce(1).write.parquet(
model_path + "/z_standardization", mode="overwrite"
)
if print_impact:
if output_mode == "replace":
output_cols = list_of_cols
else:
output_cols = [
(i + "_scaled") for i in list_of_cols if i not in excluded_cols
]
print("Before: ")
idf.select(list_of_cols).describe().show(5, False)
print("After: ")
odf.select(output_cols).describe().show(5, False)
return odf | 962a7aa5721cc7d672c858d573af3c1d021e74d7 | 3,656,782 |
def event_stats(wit_df, wit_im, wit_area, pkey='SYSID'):
"""
Compute inundation event stats with given wit wetness, events defined by (start_time, end_time)
and polygon areas
input:
wit_df: wetness computed from wit data
wit_im: inundation event
wit_area: polygon areas indexed by the key
output:
dataframe of event stats
"""
grouped_im = wit_im[['start_time', 'end_time']].groupby(pkey)
return wit_df.groupby(pkey).apply(get_im_stats, im_time=grouped_im, wit_area=wit_area).droplevel(0) | f1bcef7604e15fc9b5a845ed45b976e22655d469 | 3,656,783 |
def upvote_book(book_id):
"""
Allows a user to upvote a book.
The upvotes field on the book document is updated,
as well as the booksUpvoted array on the user document
and the upvotedBy array on the book document.
"""
user_to_update = mongo.db.users.find_one({"username": session["user"]})
username = user_to_update.get("username")
mongo.db.books.update_one({"_id": ObjectId(book_id)}, {
'$inc': {'upvotes': +1}})
mongo.db.books.update_one({"_id": ObjectId(book_id)}, {
'$push': {'upvotedBy': username}})
mongo.db.users.update_one(
user_to_update, {'$push': {'booksUpvoted': ObjectId(book_id)}})
flash("Book has been upvoted!")
return redirect(url_for("get_book", book_id=book_id)) | 6a27c46e9540b871f4123c166d9cecaebc016c6b | 3,656,784 |
import numpy
def bottlegrowth_split_mig(params, ns):
"""
params = (nuB, nuF, m, T, Ts)
ns = [n1, n2]
Instantanous size change followed by exponential growth then split with
migration.
nuB: Ratio of population size after instantanous change to ancient
population size
nuF: Ratio of contempoary to ancient population size
m: Migration rate between the two populations (2*Na*m).
T: Time in the past at which instantaneous change happened and growth began
(in units of 2*Na generations)
Ts: Time in the past at which the two populations split.
n1, n2: Sample sizes of resulting Spectrum.
"""
nuB, nuF, m, T, Ts = params
nu_func = lambda t: [nuB * numpy.exp(numpy.log(nuF/nuB) * t / T)]
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs.integrate(nu_func, T - Ts, dt_fac=0.01)
# we split the population
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
nu0 = nu_func(T - Ts)[0]
nu_func = lambda t: 2 * [nu0 * numpy.exp(numpy.log(nuF/nu0) * t / Ts)]
fs.integrate(nu_func, Ts, m = numpy.array([[0, m], [m, 0]]))
return fs | dd191a7246d6575b784e61d8e1def17c0f143a7d | 3,656,785 |
def arch_to_macho(arch):
"""Converts an arch string into a macho arch tuple."""
try:
arch = rustcall(lib.symbolic_arch_to_macho, encode_str(arch))
return (arch.cputype, arch.cpusubtype)
except ignore_arch_exc:
pass | 2ffc1be349fc8438bc5b49bab1d9c79e8cbebdd3 | 3,656,786 |
def limit_ops_skeleton(**kwargs):
"""This function provides a skeleton for limit ops calculations"""
group_phase = kwargs['group_phase']
tail = kwargs['tail']
loading_phase = kwargs['loading_phase']
final_phase = kwargs['final_phase']
grouped_df = limit_ops_general_groups(
**group_phase
)
grouped_df = grouped_df.tail(tail)
loaded_table = load_and_rename(**loading_phase)
final_phase['first_df'] = grouped_df
final_phase['second_df'] = loaded_table
final_values = limit_ops_formatter(**final_phase)
return final_values | fb2dd1da8f2229794705376e15076477160bce35 | 3,656,787 |
def xy2traceset(xpos, ypos, **kwargs):
"""Convert from x,y positions to a trace set.
Parameters
----------
xpos, ypos : array-like
X,Y positions corresponding as [nx,Ntrace] arrays.
invvar : array-like, optional
Inverse variances for fitting.
func : :class:`str`, optional
Function type for fitting; defaults to 'legendre'.
ncoeff : :class:`int`, optional
Number of coefficients to fit. Defaults to 3.
xmin, xmax : :class:`float`, optional
Explicitly set minimum and maximum values, instead of computing
them from `xpos`.
maxiter : :class:`int`, optional
Maximum number of rejection iterations; set to 0 for no rejection;
default to 10.
inmask : array-like, optional
Mask set to 1 for good points and 0 for rejected points;
same dimensions as `xpos`, `ypos`. Points rejected by `inmask`
are always rejected from the fits (the rejection is "sticky"),
and will also be marked as rejected in the outmask attribute.
ia, inputans, inputfunc : array-like, optional
These arguments will be passed to :func:`func_fit`.
xjumplo : :class:`float`, optional
x position locating start of an x discontinuity
xjumphi : :class:`float`, optional
x position locating end of that x discontinuity
xjumpval : :class:`float`, optional
magnitude of the discontinuity "jump" between those bounds
(previous 3 keywords motivated by BOSS 2-phase readout)
Returns
-------
:class:`TraceSet`
A :class:`TraceSet` object.
"""
return TraceSet(xpos, ypos, **kwargs) | 7f4146600678cdb3699b239bf49a5d6062ef2a2e | 3,656,788 |
import slate3k as slate
import logging
def leer_pdf_slate(ubicacion_archivo, password=None):
"""
Utiliza la librería slate3k para cargar un archivo PDF y extraer el texto de sus páginas.
:param ubicacion_archivo: (str). Ubicación del archivo PDF que se desea leer.
:param password: (str). Valor por defecto: None. Parámetro opcional para leer archivos \
PDF que están protegidos por contraseña.
:return: (list). Lista de strings, que contienen el texto extraído de cada página del PDF.
"""
# Para no mostrar warnings de slate
logging.getLogger('pdfminer').setLevel(logging.ERROR)
# Abrir el archivo y extraer el texto de las páginas
with open(ubicacion_archivo, 'rb') as f:
if password is not None:
paginas = slate.PDF(f, password)
else:
paginas = slate.PDF(f)
# Retornar el texto extraído
return paginas | 3e52c463238f1ecec30d34661eb8f53a6cf031a7 | 3,656,790 |
def gen_run_entry_str(query_id, doc_id, rank, score, run_id):
"""A simple function to generate one run entry.
:param query_id: query id
:param doc_id: document id
:param rank: entry rank
:param score: entry score
:param run_id: run id
"""
return f'{query_id} Q0 {doc_id} {rank} {score} {run_id}' | 657c59fea34e4aed2159337360c973dc99b53082 | 3,656,791 |
from pathlib import Path
def remove_template(args, output_file_name):
"""
remove the arg to use template; called when you make the template
:param args:
:param output_file_name:
:return:
"""
template_name = ''
dir_name = ''
template_found = False
for i in args:
if i.startswith('--template'):
# print_fun('FOUND')
args.remove(i)
# i ='--template=/s/telos/common/sjm-doc-template.tex'
# eq_loc = i.find("=") + 1
# dir_end = len(i) - i[::-1].find('/') - 1
# dir_name = i[eq_loc:dir_end]
# template_name = i[dir_end + 1:-4]
# template_found = True
# new
p = Path(i.split('=')[1])
dir_name = str(p.parent)
template_name = str(p.name)
template_found = True
# print_fun(template_name)
break
if not template_found:
raise ValueError('\n\n\nERROR: making template, need cla: --template=/template name... command line option!\n'
f'Args are {args}\nAborting.\n\n')
return
args, trash = adjust_output_file(args, dir_name, output_file_name)
return args, template_name | 652fa0112dd0b5287e1f98c5e70f84cacaa979c1 | 3,656,792 |
def replaceext(filepath, new_ext, *considered_exts):
"""replace extension of filepath with new_ext
filepath: a file path
new_ext: extension the returned filepath should have (e.g ".ext")
considered_exts: Each is a case insensitive extension that should be considered a
single extension and replaced accordingly. e.g. if you pass .tar.gz, file.tar.gz
becomes file.new_ext instead of file.tar.new_ext
returns: filepath with its extension replaced
"""
root = splitext(filepath, *considered_exts)[0]
return root + new_ext | 4e1abee01270921d01de1e75614612dcec8485d7 | 3,656,793 |
def textarea(name, content="", id=NotGiven, **attrs):
"""Create a text input area.
"""
attrs["name"] = name
_set_id_attr(attrs, id, name)
return HTML.tag("textarea", content, **attrs) | da85bdeb2d819eaa2e8109036087700afd270a21 | 3,656,794 |
def StretchContrast(pixlist, minmin=0, maxmax=0xff):
""" Stretch the current image row to the maximum dynamic range with
minmin mapped to black(0x00) and maxmax mapped to white(0xff) and
all other pixel values stretched accordingly."""
if minmin < 0: minmin = 0 # pixel minimum is 0
if maxmax > 0xff: maxmax = 0xff # pixel maximum is 255
if maxmax < minmin: maxmax = minmin # range sanity
min, max = maxmax, minmin
for pix in pixlist:
if pix < min and pix >= minmin:
min = pix
if pix > max and pix <= maxmax:
max = pix
if min > max: min = max
if min == max:
f = 1.0
else:
f = 255.0 / (max - min)
n = 0
newpixlist= []
for pix in pixlist:
if pix < minmin: pix = minmin
if pix > maxmax: pix = maxmax
pix = int((pix - min) * f)
newpixlist.append (pix)
return newpixlist | 5f511b4a8bd053d503618767fee06597f1688619 | 3,656,796 |
def get_database_uri(application):
""" Returns database URI. Prefer SQLALCHEMY_DATABASE_URI over components."""
if application.config.get('SQLALCHEMY_DATABASE_URI'):
return application.config['SQLALCHEMY_DATABASE_URI']
return '{driver}://{username}:{password}@{host}:{port}/{name}'\
.format(driver=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_DRIVER'],
username=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_USERNAME'),
password=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_PASSWORD'),
host=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_HOST'],
port=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_PORT'],
name=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_NAME']) | 6b04a9518798aa3392cdf41667e5edf1fdaa5125 | 3,656,798 |
from typing import OrderedDict
def get_s_vol_single_sma(c: CZSC, di: int = 1, t_seq=(5, 10, 20, 60)) -> OrderedDict:
"""获取倒数第i根K线的成交量单均线信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K成交量"
for t in t_seq:
x1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="其他", v2='其他', v3='其他')
x2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="其他", v2='其他', v3='其他')
s[x1.key] = x1.value
s[x2.key] = x2.value
min_k_nums = max(t_seq) + 10
if len(c.bars_raw) < min_k_nums:
return s
if di == 1:
vol = np.array([x.vol for x in c.bars_raw[-min_k_nums:]], dtype=np.float)
else:
vol = np.array([x.vol for x in c.bars_raw[-min_k_nums-di+1:-di+1]], dtype=np.float)
for t in t_seq:
sma = SMA(vol[-t-10:], timeperiod=t)
if vol[-1] >= sma[-1]:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="多头")
else:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="空头")
s[v1.key] = v1.value
if sma[-1] >= sma[-2]:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向上")
else:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向下")
s[v2.key] = v2.value
return s | d4453ec1e52ee2c19448855e0011b6ac31d5755b | 3,656,799 |
import torch
def sampler(value, percentile):
"""Score based on sampling task model output distribution
Args:
value: The output of the task model
percentile: the (sorted) index of the sample we use
Returns:
The percentile largest distance from the mean of the samples.
"""
softmaxed = nn.functional.softmax(value[0], dim=1)
samples = torch.tensor(
np.array(
list(
torch.utils.data.WeightedRandomSampler(
softmaxed, 10000)))).float()
mean_value = samples.mean(dim=1)
dist_from_mean = torch.abs(((
samples-mean_value.unsqueeze(1).repeat(
1, samples.shape[1]))+180)%360 - 180)
sorted_val = torch.sort(dist_from_mean).values
if percentile == 10000:
percentile = percentile-1
return sorted_val[:, percentile] | 905665d5219df7737adaf2c7fd435cef3f3c7f1d | 3,656,800 |
def gists_by(username, number=-1, etag=None):
"""Iterate over gists created by the provided username.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.gists_by` instead.
:param str username: (required), if provided, get the gists for this user
instead of the authenticated user.
:param int number: (optional), number of gists to return. Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`
"""
if username:
return gh.gists_by(username, number, etag)
return iter([]) | b98f478dbac25c0334296da5055952a776af9d39 | 3,656,801 |
import pyspark
def needs_spark(test_item):
"""
Use as a decorator before test classes or methods to only run them if Spark is usable.
"""
test_item = _mark_test('spark', test_item)
try:
# noinspection PyUnresolvedReferences
except ImportError:
return unittest.skip("Skipping test. Install PySpark to include this test.")(test_item)
except:
raise
else:
return test_item | f4d40b7119f753162ed5f6377ebef3b42d2bf549 | 3,656,802 |
from typing import Generator
def get_school_years_from_db() -> Generator:
"""Get all school years from the database.
:return: iterable with all availabe school years
"""
session: db.orm.session.Session = Session()
return (e[0] for e in set(session.query(Holiday.school_year).all())) | 48651fab2364e03a2d18224c7a798d8754cca911 | 3,656,803 |
def get_api(context=None):
"""
This function tries to detect if the app is running on a K8S cluster or locally
and returns the corresponding API object to be used to query the API server.
"""
if app.config.get("MODE") == "KUBECONFIG":
return client.CustomObjectsApi(config.new_client_from_config(context=context))
elif app.config.get("MODE") == "CLUSTER":
return client.CustomObjectsApi() | 89808a80c3ad4ae1260ffbb9611543b6e33298ee | 3,656,804 |
def is_variant(title) -> bool:
"""
Check if an issue is variant cover.
"""
return "variant" in title.lower() | 5e0bab3030c069d7726bbc8c9909f561ed139cb8 | 3,656,805 |
def _decode_common(hparams):
"""Common graph for decoding."""
features = get_input(hparams, FLAGS.data_files)
decode_features = {}
for key in features:
if key.endswith("_refs"):
continue
decode_features[key] = features[key]
_, _, _, references = seq2act_model.compute_logits(
features, hparams, mode=tf.estimator.ModeKeys.EVAL)
decode_utils.decode_n_step(seq2act_model.compute_logits,
decode_features, references["areas"],
hparams, n=20,
beam_size=FLAGS.beam_size)
decode_mask = generate_action_mask(decode_features)
return decode_features, decode_mask, features | a9eac81d9fe5e0480c679e41a61699b9e281fdd5 | 3,656,806 |
from typing import Tuple
def _lex_single_line_comment(header: str) -> Tuple[str, str]:
"""
>>> _lex_single_line_comment("a=10")
('', 'a=10')
>>> _lex_single_line_comment("//comment\\nb=20")
('', 'b=20')
"""
if header[:2] != "//":
return "", header
line_end_pos = header.find("\n")
return "", header[line_end_pos + 1 :] | 4d562557db11c7279042e439a56cc7864fa259ef | 3,656,807 |
def in_box(X, box):
"""Get a boolean array indicating whether points X are within a given box
:param X: n_pts x n_dims array of points
:param box: 2 x n_dims box specs (box[0, :] is the min point and box[1, :] is the max point)
:return: n_pts boolean array r where r[idx] is True iff X[idx, :] is within the box
>>> import numpy as np
>>> X = np.arange(12).reshape((4, 3))
>>> print(X)
[[ 0 1 2]
[ 3 4 5]
[ 6 7 8]
[ 9 10 11]]
>>> in_box(X, [[1, 2, 3], [6, 7, 8]])
array([False, True, True, False])
>>> in_box(X, box=[[2] * 3, [7] * 3])
array([False, True, False, False])
"""
MINS_ROW_IDX = 0
MAXS_ROW_IDX = 1
X, box = map(np.array, (X, box))
n_rows_in_box_matrix, ndims = box.shape
assert (
n_rows_in_box_matrix == 2
), 'box must have 2 rows only: [0] the min point and [1] the max point of the box'
assert (
X.shape[1] == ndims
), f"ndims of X should be aligned with box's ({ndims}): Was {X.shape[1]}"
return np.all((box[MINS_ROW_IDX, :] <= X) & (X <= box[MAXS_ROW_IDX, :]), axis=1) | 8ee516937b3a19a27fed81cfee0ca19356cb5249 | 3,656,809 |
def test_partial_field_square():
"""Fields that do not extend over the whole wall"""
field = np.zeros((40, 40))
field[:10, 0] = 1
fields = {kw_field_map: field}
walls = "L"
assert func(fields, "s", walls=walls) == 0.25
field[:20, 0] = 1
assert func(fields, "s", walls=walls) == 0.5
field[:30, 0] = 1
assert func(fields, "s", walls=walls) == 0.75
print("test_partial_field() passed")
return True | 94af1cf9e500ddddd16c9fea61eeb43874589b68 | 3,656,810 |
def get_empty_faceid(current_groupid, uuid, embedding,
img_style, number_people, img_objid, forecast_result):
"""
当softmax无结果时(无模型/预测置信度低)调用遍历数据库识别
:param current_groupid:
:param uuid:
:param embedding:
:param img_style:
:param number_people:
:param img_objid:
:return:
"""
json_data = {'detected': True, 'recognized': False}
face_id = img_objid + str(all_face_index).zfill(4)
json_data['recognized'] = False
json_data['face_id'] = face_id
json_data['accuracy'] = 0
json_data['style'] = img_style
forecast_result['face_id'] = face_id
forecast_result['face_accuracy'] = 0
embedding_string = ','.join(str(x) for x in embedding)
forecast_result['embedding_string'] = embedding_string
return json_data, forecast_result | 265250564de0ac160c5f0110293fc52693edaeda | 3,656,812 |
def dz_and_top_to_phis(
top_height: xr.DataArray, dz: xr.DataArray, dim: str = COORD_Z_CENTER
) -> xr.DataArray:
""" Compute surface geopotential from model top height and layer thicknesses"""
return _GRAVITY * (top_height + dz.sum(dim=dim)) | 14e19781cdac7a743d26db7d29317ea33ae94517 | 3,656,813 |
import scipy
def altPDF(peaks,mu,sigma=None,exc=None,method="RFT"):
"""
altPDF: Returns probability density using a truncated normal
distribution that we define as the distribution of local maxima in a
GRF under the alternative hypothesis of activation
parameters
----------
peaks: float or list of floats
list of peak heigths
mu:
sigma:
returns
-------
fa: float or list
probability density of the peaks heights under Ha
"""
#Returns probability density of the alternative peak distribution
peaks = np.asarray(peaks)
if method == "RFT":
# assert type(sigma) is in [float, int]
# assert sigma is not None
ksi = (peaks-mu)/sigma
alpha = (exc-mu)/sigma
num = 1/sigma * scipy.stats.norm.pdf(ksi)
den = 1. - scipy.stats.norm.cdf(alpha)
fa = num/den
elif method == "CS":
fa = [peakdistribution.peakdens3D(y-mu,1) for y in peaks]
return fa | 0346d1efcad2a3f8e1548857c980fb6c92ea07f3 | 3,656,814 |
def implicit_quantile_network(num_actions, quantile_embedding_dim,
network_type, state, num_quantiles):
"""The Implicit Quantile ConvNet.
Args:
num_actions: int, number of actions.
quantile_embedding_dim: int, embedding dimension for the quantile input.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
num_quantiles: int, number of quantile inputs.
Returns:
net: _network_type object containing the tensors output by the network.
"""
model = atari_lib.ImplicitQuantileNetwork(num_actions, quantile_embedding_dim)
net = model(state, num_quantiles)
return network_type(quantile_values=net.quantile_values,
quantiles=net.quantiles) | 2782f94b2003dca0a8865298dab2dbb17ec4cb45 | 3,656,815 |
def get_waitlist(usercode):
"""
Запрос /api/waitlists/{usercode} - возвращает waitlist контент по usercode
"""
user_by_usercode = (
AppUsers.query.filter(AppUsers.usercode == usercode).one_or_none()
)
if user_by_usercode is None:
abort(
409,
"Usercode {usercode} does not exists".format(
usercode=usercode
),
)
string_array_waitlist = user_by_usercode.waitlist
# Запрос по id
try:
array_waitlist = [int(s) for s in string_array_waitlist.split(',')]
except ValueError:
abort(
404,
"Waitlist empty or wrong format. Format of waitlist string should be - 1,2,3,4,5 etc",
)
except AttributeError:
abort(
404,
"Waitlist empty or wrong format. Format of waitlist string should be - 1,2,3,4,5 etc",
)
content = Content.query.filter(Content.content_id.in_(array_waitlist)).all()
# Проверка на наличие id
if content is not None:
# Сериализация
content_schema = ContentSchema(many=True)
data = content_schema.dump(content).data
return data
# Ошибка, если нет
else:
abort(
404,
"Empty show list with this IDs",
) | 0592d188020b967198c1cb052d1e4b3adbc1ed21 | 3,656,816 |
def not_empty(message=None) -> Filter_T:
"""
Validate any object to ensure it's not empty (is None or has no elements).
"""
def validate(value):
if value is None:
_raise_failure(message)
if hasattr(value, '__len__') and value.__len__() == 0:
_raise_failure(message)
return value
return validate | f1ee9b43936978dfbd81550b9931d0cc8800eef2 | 3,656,817 |
def temporal_affine_forward(x, W, b):
"""
Run a forward pass for temporal affine layer. The dimensions are consistent with RNN/LSTM forward passes.
Arguments:
x: input data with shape (N, T, D)
W: weight matrix for input data with shape (D, M)
b: bias with shape (M,)
Outputs:
out: output data with shape (N, T, M)
cache: cache for back-prop
"""
N, T, D = x.shape
M = b.shape[0]
out = np.dot(x.reshape(N * T, D), W).reshape(N, T, M) + b
cache = x, W, b, out
return out, cache | 2eca1c3ef36eb8bdbcaaad88c3b2f1234227e2d4 | 3,656,818 |
def uniform_regular_knot_vector(n, p, t0=0.0, t1=1.0):
"""
Create a p+1-regular uniform knot vector for
a given number of control points
Throws if n is too small
"""
# The minimum length of a p+1-regular knot vector
# is 2*(p+1)
if n < p+1:
raise RuntimeError("Too small n for a uniform regular knot vector")
# p+1 copies of t0 left and p+1 copies of t1 right
# but one of each in linspace
return [t0]*p + list(np.linspace(t0, t1, n+1-p)) + [t1]*p | e0e1bc9f2e2ea2e74d70c76d35479efebc42d2f7 | 3,656,819 |
def generateCM(labelValue, predictValue):
"""Generates the confusion matrix and rteturn it.
Args:
labelValue (np.ndarray): true values.
predictValue (np.ndarray): predicted values.
"""
FPMtx = np.logical_and((labelValue <= 0), (predictValue > 0))
FPIndices = np.argwhere(FPMtx)
FPNum = np.sum(FPMtx)
FNMtx = np.logical_and((labelValue > 0), (predictValue <= 0))
FNIndices = np.argwhere(FNMtx)
FNNum = np.sum(FNMtx)
TPMtx = np.logical_and((labelValue > 0), (predictValue > 0))
TPIndices = np.argwhere(TPMtx)
TPNum = np.sum(TPMtx)
TNMtx = np.logical_and((labelValue <= 0), (predictValue <= 0))
TNIndices = np.argwhere(TNMtx)
TNNum = np.sum(TNMtx)
accuracy = (TPNum+TNNum) / (TPNum+TNNum+FPNum+FNNum)
FPrate = FPNum / (FPNum+TNNum)
FNrate = FNNum / (TPNum+FNNum)
TNrate = TNNum / (FPNum+TNNum)
TPrate = TPNum / (TPNum+FNNum)
print(
"TP: {:.0f}, FN: {:.0f}, FP: {:.0f}, TN: {:.0f}".format(
TPNum, FNNum, FPNum, TNNum
)
)
cm = np.array([[TPrate, FNrate], [FPrate, TNrate]])
return cm, accuracy, TPIndices, FNIndices, FPIndices, TNIndices | 3ea5751bb9c9153edf4fdce12512319b75f80484 | 3,656,820 |
def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:
"""Get the cosine similarity matrix from the embedding.
Warning; might be very big!
"""
return cosine_similarity(word2vec.wv.vectors) | b9a976de8faef0cd85265c4afdb80dd8720128f5 | 3,656,821 |
def get_bot_id() -> str:
"""
Gets the app bot ID
Returns:
The app bot ID
"""
response = CLIENT.auth_test()
return response.get('user_id') | 6dcf2121fb11fb4af1615c9d739923e86299cc0a | 3,656,822 |
def _fetch_from_s3(bucket_name, path):
"""Fetch the contents of an S3 object
Args:
bucket_name (str): The S3 bucket name
path (str): The path to the S3 object
Returns:
str: The content of the S3 object in string format
"""
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(path)
data = obj.get()['Body'].read().decode('utf-8')
return data | 3e284b653c046a826b92e82bf60e7e12547280b2 | 3,656,824 |
def inc_date(date_obj, num, date_fmt):
"""Increment the date by a certain number and return date object.
as the specific string format.
"""
return (date_obj + timedelta(days=num)).strftime(date_fmt) | 560d82b8e72614b8f9011ab97c10a7612d1c50b0 | 3,656,826 |
def recombine_edges(output_edges):
"""
Recombine a list of edges based on their rules.
Recombines identical Xe isotopes. Remove isotopes.
:param output_edges:
:return:
"""
mol = Chem.MolFromSmiles(".".join(output_edges))
# Dictionary of atom's to bond together and delete if they come in pairs
iso_dict = {}
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 54:
# Get the isotope
iso = atom.GetIsotope()
if iso in iso_dict:
iso_dict[iso].append(get_info(atom))
else:
iso_dict[iso] = [get_info(atom)]
mw = Chem.RWMol(mol)
# Add bonds first
del_indices = []
for isotope in iso_dict:
if len(iso_dict[isotope]) > 1:
mw.AddBond(
iso_dict[isotope][0][1], iso_dict[isotope][1][1], Chem.BondType.SINGLE
)
del_indices.append(iso_dict[isotope][0][0])
del_indices.append(iso_dict[isotope][1][0])
# Now delete atoms
del_count = 0
for atom_index in sorted(del_indices):
mw.RemoveAtom(atom_index - del_count)
del_count += 1
Chem.SanitizeMol(mw)
return Chem.MolToSmiles(mw, isomericSmiles=True) | 1bbce0bb315990f758aa47a0dae2dc34bff9fb2a | 3,656,827 |
def excludevars(vdict, filters):
"""
Remove dictionary items by filter
"""
vdict_remove = dict()
for filtr in filters:
a = filtervars_sub(vdict, filtr)
vdict_remove.update(a)
vdict_filtered = vdict.copy()
for key in vdict_remove.keys():
del vdict_filtered[key]
return vdict_filtered | 5050e946454c096a11c664d1a0910d2b2f7d985a | 3,656,829 |
def make_laplace_pyramid(x, levels):
"""
Make Laplacian Pyramid
"""
pyramid = []
current = x
for i in range(levels):
pyramid.append(laplacian(current))
current = tensor_resample(
current,
(max(current.shape[2] // 2, 1), max(current.shape[3] // 2, 1)))
pyramid.append(current)
return pyramid | 88b8c94a8f5ca3fda3329c0ac8fa871693c1482f | 3,656,831 |
def create_component(ctx: NVPContext):
"""Create an instance of the component"""
return ToolsManager(ctx) | 24cf48073bb16233046abdde966c28c570cf16c0 | 3,656,832 |
def get_routing_table() -> RouteCommandResult:
"""
Execute route command via subprocess. Blocks while waiting for output.
Returns the routing table in the form of a list of routes.
"""
return list(subprocess_workflow.exec_and_parse_subprocesses(
[RouteCommandParams()],
_get_route_command_args_list,
parse_route_output,
))[0] | 817d8350e7a2af514e3b239ec5d7dbc278fb7649 | 3,656,834 |
import array
def xor_arrays(arr1, arr2):
""" Does a XOR on 2 arrays, very slow"""
retarr = array('B')
for i in range(len(arr1)):
retarr.append(arr1[i] ^ arr2[i])
return retarr | 5ff978aa1a48a537a40132a5213b907fb7b14b4b | 3,656,835 |
def delete_category():
"""Delete category specified by id from database"""
category = Category.query.get(request.form['id'])
db.session.delete(category)
db.session.commit()
return '' | 47347299dd39c6afa9fd8d1cd10e1dc0906f6806 | 3,656,836 |
def gen_dd(acc, amt):
"""Generate a DD (low-level)"""
read()
dd_num = dd_no()
while dd_num in dds.keys():
dd_num = dd_no()
dd = {
'ac_no': acc,
'amount': amt
}
return dd_num, dd | 251a36131dae66f4d24dc2ce45db27f81da39845 | 3,656,837 |
def coranking_matrix(high_data, low_data):
"""Generate a co-ranking matrix from two data frames of high and low
dimensional data.
:param high_data: DataFrame containing the higher dimensional data.
:param low_data: DataFrame containing the lower dimensional data.
:returns: the co-ranking matrix of the two data sets.
"""
n, m = high_data.shape
high_distance = distance.squareform(distance.pdist(high_data))
low_distance = distance.squareform(distance.pdist(low_data))
high_ranking = high_distance.argsort(axis=1).argsort(axis=1)
low_ranking = low_distance.argsort(axis=1).argsort(axis=1)
Q, xedges, yedges = np.histogram2d(high_ranking.flatten(),
low_ranking.flatten(),
bins=n)
Q = Q[1:, 1:] # remove rankings which correspond to themselves
return Q | 7cc77cd5ef70d7adef9020cab6f33a5dbf290557 | 3,656,838 |
def gaussian_dist_xmu1xmu2_product_x(mu1,Sigma1,mu2,Sigma2):
"""Compute distribution of N(x|mu1,Sigma1)N(x|mu2,Sigma2)"""
InvSigmaHat = np.linalg.inv(Sigma1) + np.linalg.inv(Sigma2)
SigmaHat = np.linalg.inv(InvSigmaHat)
muHat = np.dot(SigmaHat,np.linalg.solve(Sigma1, mu1) + np.linalg.solve(Sigma2,mu2))
logC = gaussian_logprob(mu1,mu2,Sigma1 + Sigma2)
return (logC,muHat,SigmaHat) | 5eb50e98165bc77bc0754a93eef4f62b0665ea30 | 3,656,839 |
def default_marker_size(fmt):
""" Find a default matplotlib marker size such that different marker types
look roughly the same size.
"""
temp = fmt.replace('.-', '')
if '.' in temp:
ms = 10
elif 'D' in temp:
ms = 7
elif set(temp).intersection('<>^vd'):
ms = 9
else:
ms = 8
return ms | feebe9bdda47a2e041636f15c9b9595e5cd6b2cc | 3,656,840 |
def vote_smart_candidate_rating_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'rating': rating.rating,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
'sigId': rating.sigId,
}
return rating_filtered | f4fec92e46f58444abb8dab56f28acc7e670aab0 | 3,656,841 |
def get_syntax(view):
""" get_syntax(view : sublime.View) -> str
>>> get_syntax(view)
'newLISP'
>>> get_syntax(view)
'Lisp'
Retuns current file syntax/language
"""
syntax = view.settings().get('syntax')
syntax = syntax.split('/')[-1].replace('.tmLanguage', '')
return syntax | a5be75f51de105af63ce53df7c3b7094537d28f3 | 3,656,842 |
def random_otp():
"""
:return: OTP for Event
:return type: string
"""
try:
all_events = Events.query.all() # Here Error if no Event
all_holded_events = HoldedEvents.query.all()
used_otps = set()
for otp_ in all_events:
used_otps.add(str(otp_.otp))
for otp_ in all_holded_events:
used_otps.add(str(otp_.otp))
total_otps = set()
available_otps = set()
for otp_ in range(0, 999999+1):
otp = str(otp_)
if len(otp)!=6:
diff = 6-len(otp)
otp = '0'*diff + otp
total_otps.add(otp)
available_otps = total_otps - used_otps
if len(available_otps) == 1:
return available_otps.pop()
else:
return 'Fail'
except:
return 'Fail' | e343addc9252de4ca9d69a344beea05254c9ebb0 | 3,656,843 |
def read_config(path):
"""Read the complete INI file and check its version number
if OK, pass values to config-database
"""
return _read_config(path) | bbb95e5e02d54dd831082d556e19307109e1113d | 3,656,844 |
def getPath(file):
"""Get the path of a source file.
Use this to extract the path of a file/directory when the file
could be specified either as a FileTarget, DirectoryTarget or string.
@param file: The object representing the file.
@type file: L{FileTarget}, L{DirectoryTarget} or C{basestring}
"""
assert not isinstance(file, AsyncResult)
if isinstance(file, (FileTarget, DirectoryTarget)):
return file.path
elif isinstance(file, basestring):
return file
else:
return None | b80e5f0ead8be98dd40bbd444bc8ae9201eb54ed | 3,656,845 |
import torch
def optical_flow_to_rgb(flows):
"""
Args:
A tensor with a batch of flow fields of shape [b*num_src, 2, h, w]
"""
flows = flows.cpu().numpy()
_, h, w = flows[0].shape
rgbs = []
for i in range(len(flows)):
mag, ang = cv2.cartToPolar(flows[i, 0, ...], flows[i, 1, ...])
hsv = np.zeros(shape=(h, w, 3), dtype="float32")
# true_angle / 2, hue range [0, 180]
hsv[..., 0] = (ang * 180 / np.pi) / 2
hsv[..., 1] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv[..., 2] = 255
rgb = cv2.cvtColor(hsv.astype("uint8"), cv2.COLOR_HSV2BGR)
rgbs.append(rgb)
rgbs = np.array(rgbs).transpose([0, 3, 1, 2])
return torch.tensor(rgbs) | d27074eab88f0f1181c5e1acae4839cbed984e17 | 3,656,846 |
from re import MULTILINE
def get_motes_from_simulation(simfile, as_dictionary=True):
"""
This function retrieves motes data from a simulation file (.csc).
:param simfile: path to the simulation file
:param as_dictionary: flag to indicate that the output has to be formatted as a dictionary
:return: the list of motes formatted as dictionaries with 'id', 'x', 'y' and 'motetype_identifier' keys if
short is False or a dictionary with each mote id as the key and its tuple (x, y) as the value
"""
motes = []
with open(simfile) as f:
content = f.read()
iterables, fields = [], ['mote_id']
for it in ['id', 'x', 'y', 'motetype_identifier']:
iterables.append(finditer(r'^\s*<{0}>(?P<{0}>.*)</{0}>\s*$'.format(it), content, MULTILINE))
for matches in zip(*iterables):
mote = {}
for m in matches:
mote.update(m.groupdict())
motes.append(mote)
if as_dictionary:
motes = {int(m['id']): (float(m['x']), float(m['y'])) for m in motes}
return motes | bbf09378a45cee9a96dca136bc7751ea9372eeac | 3,656,847 |
def menu_bar():
"""each mini-game has a menu bar that allows direct access to
the main menu. This allows story mode to be bypassed after
starting war, but the game state will not be saved"""
pygame.draw.rect(SCREEN, TEAL, (0, 460, 640, 40))
menu_font = pygame.font.Font('freesansbold.ttf', 15)
menu_txt = menu_font.render("Menu", True, BLACK, TEAL)
menu_rect = menu_txt.get_rect()
menu_rect.center = (60, 480)
SCREEN.blit(menu_txt, menu_rect)
instr_txt = menu_font.render("Instructions", True, BLACK, TEAL)
instr_rect = instr_txt.get_rect()
instr_rect.center = (150, 480)
SCREEN.blit(instr_txt, instr_rect)
return menu_rect, instr_rect | 0b5b16db2f53c1cbb45236512597d954bb28e7da | 3,656,848 |
def merge_sort(a, p, r):
""" merge sort
:param a: a array to sort, a[p:r+1] need to be sorted
:param p: index of array, p < r, if p >= r , the length of a is 1, return
:param r: index of array, p < r, if p >= r , the length of a is 1, return
"""
if p < r:
q = int((p + r) / 2)
# divider
a = merge_sort(a, p, q)
a = merge_sort(a, q + 1, r)
# conquer
merge(a, p, q, r)
return a | 07aab16ea75cb01f2f1fb3ae32f1b1ac31c76cfb | 3,656,849 |
def get_flow_graph(limit, period):
"""
:type limit int
:type period int
:rtype: list[dict]
"""
rows = ElasticsearchQuery(
es_host=ELASTICSEARCH_HOST,
period=period,
index_prefix='logstash-other'
).query_by_string(
query='kubernetes.labels.job-name:* AND '
'kubernetes.container_name: "portability-metric" AND ("SELECT" OR "UPDATE")',
fields=[
'log',
'kubernetes.labels.job-name'
],
limit=limit
)
entries = []
for row in rows:
for entry in get_portability_metrics_query(
row['log'], row['kubernetes']['labels']['job-name']):
entries.append(entry)
# print(entries)
# process the logs
def _map(item):
return '{}'.join(item)
def _reduce(items):
# ('MetricArticleProvider.py', 'UPDATE', 'articledata')
first = items[0]
script = 'cron:{}'.format(first[0])
query_type = first[1]
table_name = 'db:{}'.format(first[2])
return {
'source': table_name if query_type == 'SELECT' else script,
'edge': query_type,
'target': table_name if query_type != 'SELECT' else script,
}
return logs_map_and_reduce(entries, _map, _reduce) | f51bb6aa6132303e2cd5ed3090507435739c0452 | 3,656,850 |
import re
import time
def upload(server_ip, share, username, password, domain, remote_path, local_path, verbose=True):
""" Get file and folder on the remote file server.
server_ip (str): This value is the ip smb server's ip.
share (str): This value is the share file name.
username (str): This value is the login username required to connect to smb service.
password (str): This value is the login password required to connect to smb service.
domain (str): This value is the server domain name.
remote_path (str): This value is the remote file path to uploaded.
local_path (str): This value is the remote path where the file will uploaded.
verbose (boolean): Print information about function progress.
Returns:
boolean: 0 if fuction runs correctly. If an error occured return 1.
"""
try:
smb = connect_samba_server(server_ip, share, username, password, domain, verbose=True)
smb.upload(local_path, remote_path)
smb.close()
regex = re.compile("((?:[^/]*/)*)(.*)")
for file in get_remote_dir(server_ip, share, username, password, domain, "/", verbose=True):
if regex.match(remote_path).group(2) in file:
print(Fore.GREEN+" ===> [upload] {"+regex.match(local_path).group(2)+"} -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return True
print(Fore.RED+" ===> [upload] {"+regex.match(local_path).group(2)+"} failed! -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return False
except Exception as e:
print(Fore.RED+" ===> [upload] failed during execution! -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return False | 552079181faa10b50c306b1ee9e02c190b9711a4 | 3,656,851 |
from typing import Union
import platform
def list_directory_command(api_client: CBCloudAPI, device_id: str, directory_path: str, limit: Union[int, str]):
"""
Get list of directory entries in the remote device
:param api_client: The API client
:param device_id: The device id
:param directory_path: Directory to list. This parameter should end with the path separator
:param limit: Limit the result entries count to be the given limit
:return: CommandResult represent the API command result
:rtype: ``CommandResults``
"""
session = api_client.select(platform.Device, device_id).lr_session()
items = [item for item in session.list_directory(directory_path) if item['filename'] not in IGNORED_FILES_IN_DIR]
items, partial_res_msg = get_limited_results(original_results=items, limit=limit)
directories_readable = []
context_entry_items = []
headers = ['name', 'type', 'date_modified', 'size']
for item in items:
context_entry_items.append(item)
directories_readable.append({
'name': item['filename'],
'type': 'Directory' if item['attributes'] and 'DIRECTORY' in item['attributes'] else 'File',
'date_modified': item['last_write_time'],
'size': item['size'],
})
context_entry = dict(content=context_entry_items, device_id=device_id, directory_path=directory_path)
readable_output = tableToMarkdown(f'Directory of {directory_path}{partial_res_msg}',
t=directories_readable,
headers=headers,
headerTransform=string_to_table_header,
removeNull=True)
return CommandResults(
outputs_prefix='CarbonBlackDefenseLR.Directory',
outputs_key_field=['device_id', 'directory_path'],
outputs=context_entry,
readable_output=readable_output,
raw_response=items,
) | 228d4884d2fd4f69e8c7a44d737bfeca7b40f753 | 3,656,852 |
def _hparams(network, random_seed):
"""
Global registry of hyperparams. Each entry is a (default, random) tuple.
New algorithms / networks / etc. should add entries here.
"""
hparams = {}
def _hparam(name, default_val, random_val_fn):
"""Define a hyperparameter. random_val_fn takes a RandomState and
returns a random hyperparameter value."""
random_state = np.random.RandomState(
misc.seed_hash(random_seed, name)
)
hparams[name] = (default_val, random_val_fn(random_state))
# Unconditional hparam definitions.
_hparam('lr', 0.001, lambda r: 10**r.uniform(-5, -2)) #
_hparam('weight_decay', 0, lambda r: 10**r.uniform(-6, -2))
_hparam('batch_size', 16, lambda r: int(r.choice([8,12,16])))
_hparam('epoch', 100, lambda r: int(r.choice([60,90,120,150])))
_hparam('transform_aug', False, lambda r: bool(r.choice([True,False])))
_hparam('lr_schedule', 1, lambda r: int(r.choice([0,1,2,3])))
if network == 'PoseResNet':
_hparam('num_layers', 50, lambda r: int(r.choice([50]))) #[18,34,50,101,152]
_hparam('pretrained', False, lambda r: bool(r.choice([False]))) #True,
return hparams | 34ea9ac295f3150b870e8d1c7ce0f1b867f75122 | 3,656,853 |
def bidding_search(request):
"""
"""
query = ''
form = BiddingSearchForm(shop=request.shop, data=request.GET)
if form.is_valid():
query = form.get_query()
results = form.search()
else:
results = form.all_results()
pager = Paginator(results, PAGE_SEARCH)
try:
page = int(request.GET.get('page','1'))
except:
page = 1
try:
products = pager.page(page)
except (EmptyPage, InvalidPage):
products = pager.page(pager.num_pages)
paged = (pager.num_pages > 1)
t = loader.get_template('bidding/blocks/search.html')
c = RequestContext(request, {'form': form,
'products' : products,
'pages': pager.page_range,
'paged': paged })
block_search = (t.render(c))
getvars = "&q=%s" % form.cleaned_data.get("q")
t = loader.get_template('paginator.html')
filter_params = {'q': form.cleaned_data.get("q", '')}
c = RequestContext(request, {'objects': products,
'getvars': getvars,
'filter_params': filter_params,
'pages': pager.page_range,
'paged': paged})
paginator = (t.render(c))
try:
page = DynamicPageContent.objects.filter(shop=request.shop, page="search").get()
description = striptags(page.meta_content)
except DynamicPageContent.DoesNotExist:
description = "No meta description found"
return HttpResponse(my_render(request, {'results': block_search,
'paginator': paginator,
'page_title': 'Search',
'page_description': description
}, 'search')) | cda6c4ebccec88c40ae714cd81392946165b184f | 3,656,854 |
def clean_code(code, code_type):
""" Returns the provided code string as a List of lines """
if code_type.startswith(BOOTSTRAP):
if code_type.endswith(CLEAN):
return code.split("\n")
code = code.replace("\\", "\\\\")
if code_type.startswith(PERMUTATION):
if code_type.endswith(CLEAN):
return code.split("\n")
if code_type.startswith(FRAGMENT):
if code_type.endswith(CLEAN):
return bytes(code, encoding="ascii").decode('unicode_escape')
code = code.replace("{", "{\\n").replace("}", "\\n}\\n").replace(";", ";\\n")
code = retab(bytes(code, encoding="ascii").decode('unicode_escape'))
return code.split("\n") | fff65103003a202a039fd4683da83735b0342a7a | 3,656,855 |
def level(arr, l, ax=2, t=None, rounding=False):
"""
As level 1D but accepts general arrays and level is taken is some
specified axis.
"""
return np.apply_along_axis(level1D, ax, arr, l, t, rounding) | 80835140850dbf7883f9b4cb1f92543ee2253845 | 3,656,856 |
def updateStore(request, storeId):
""" view for updating store """
if canViewThisStore(storeId, request.user.id):
# get the corresponding store
store = Store.objects.get(id=storeId)
metadata = getFBEOnboardingDetails(store.id)
if request.method == "POST":
# Create a form instance and populate it with data from the request (binding):
form = UpdateStoreForm(request.POST)
# Check if the form is valid:
if form.is_valid():
store.name = form.cleaned_data["business_name"]
store.save()
return redirect("viewStore", storeId)
form = UpdateStoreForm(initial={"business_name": store.name})
breadcrumbs = [(store.name, "viewStore", store.id)]
context = {
"form": form,
"store": store,
"fb_metadata": metadata,
"page_title": "Update Shop",
"breadcrumbs": breadcrumbs,
"button": "Update",
}
return render(request, "core/update.html", context)
else:
return render(request, "403.html") | 8cb92e31f2dc59a28281c45d698a7d810b573587 | 3,656,857 |
def i(t, T, r, a, b, c):
"""Chicago design storm equation - intensity. Uses ia and ib functions.
Args:
t: time in minutes from storm eginning
T: total storm duration in minutes
r: time to peak ratio (peak time divided by total duration)
a: IDF A parameter - can be calculated from getABC
b: IDF B parameter - can be calculated from getABC
c: IDF C parameter - can be calculated from getABC
Returns:
Returns intensity in mm/hr.
"""
if t < T*r:
return ib(T*r - t, r, a, b, c)
elif t > T*r:
return ia(t - T*r, r, a, b, c)
else:
# Should be infinity, but this does the job
return 1000 | 3d31d64502fc4590b1d83e0d3a32a5de9cae2a56 | 3,656,858 |
def get_primary_id_from_equivalent_ids(equivalent_ids, _type):
"""find primary id from equivalent id dict
params
------
equivalent_ids: a dictionary containing all equivalent ids of a bio-entity
_type: the type of the bio-entity
"""
if not equivalent_ids:
return None
id_rank = [('bts:' + _item) for _item in id_ranks.get(_type)]
# loop through id_rank, if the id is found in equivalent ids, return it
for _item in id_rank:
if equivalent_ids.get(_item):
return (_item[4:] + ':' + equivalent_ids[_item][0])
# if no id found, return a random one from equivalent ids
for k, v in equivalent_ids.items():
if v:
return (k[4:] + ':' + v[0]) | 489f9d431e553772f114f1e4a3a2577c831addda | 3,656,859 |
def get_L_max_C(L_CS_x_t_i, L_CL_x_t_i):
"""1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
Args:
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
Returns:
float: 1日当たりの冷房全熱負荷の年間最大値(MJ/d)
"""
# 暖冷房区画軸合算(暖冷房区画の次元をなくす)
L_CS_x_t = np.sum(L_CS_x_t_i, axis=0)
L_CL_x_t = np.sum(L_CL_x_t_i, axis=0)
# L_CS_x_tとL_CL_x_tの要素同士を足す
L_C_x_t = L_CS_x_t + L_CL_x_t
# 1次元配列を2次元配列に形状変換する
L_C_x_t = np.reshape(L_C_x_t, (365, 24))
# 時間軸合算
L_C_x = np.sum(L_C_x_t, axis=1)
# 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
L_max_C = np.max(L_C_x)
return L_max_C | 2c5e769baacfec0d75e711b3493b07ab65796690 | 3,656,860 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.