content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _fix_nested_array(func_ir):
"""Look for assignment like: a[..] = b, where both a and b are numpy arrays, and
try to eliminate array b by expanding a with an extra dimension.
"""
"""
cfg = compute_cfg_from_blocks(func_ir.blocks)
all_loops = list(cfg.loops().values())
def find_nest_level(label):
level = 0
for loop in all_loops:
if label in loop.body:
level += 1
"""
def find_array_def(arr):
"""Find numpy array definition such as
arr = numba.unsafe.ndarray.empty_inferred(...).
If it is arr = b[...], find array definition of b recursively.
"""
arr_def = func_ir.get_definition(arr)
_make_debug_print("find_array_def")(arr, arr_def)
if isinstance(arr_def, ir.Expr):
if guard(_find_unsafe_empty_inferred, func_ir, arr_def):
return arr_def
elif arr_def.op == 'getitem':
return find_array_def(arr_def.value)
raise GuardException
def fix_array_assign(stmt):
"""For assignment like lhs[idx] = rhs, where both lhs and rhs are arrays, do the
following:
1. find the definition of rhs, which has to be a call to numba.unsafe.ndarray.empty_inferred
2. find the source array creation for lhs, insert an extra dimension of size of b.
3. replace the definition of rhs = numba.unsafe.ndarray.empty_inferred(...) with rhs = lhs[idx]
"""
require(isinstance(stmt, ir.SetItem))
require(isinstance(stmt.value, ir.Var))
debug_print = _make_debug_print("fix_array_assign")
debug_print("found SetItem: ", stmt)
lhs = stmt.target
# Find the source array creation of lhs
lhs_def = find_array_def(lhs)
debug_print("found lhs_def: ", lhs_def)
rhs_def = get_definition(func_ir, stmt.value)
debug_print("found rhs_def: ", rhs_def)
require(isinstance(rhs_def, ir.Expr))
if rhs_def.op == 'cast':
rhs_def = get_definition(func_ir, rhs_def.value)
require(isinstance(rhs_def, ir.Expr))
require(_find_unsafe_empty_inferred(func_ir, rhs_def))
# Find the array dimension of rhs
dim_def = get_definition(func_ir, rhs_def.args[0])
require(isinstance(dim_def, ir.Expr) and dim_def.op == 'build_tuple')
debug_print("dim_def = ", dim_def)
extra_dims = [ get_definition(func_ir, x, lhs_only=True) for x in dim_def.items ]
debug_print("extra_dims = ", extra_dims)
# Expand size tuple when creating lhs_def with extra_dims
size_tuple_def = get_definition(func_ir, lhs_def.args[0])
require(isinstance(size_tuple_def, ir.Expr) and size_tuple_def.op == 'build_tuple')
debug_print("size_tuple_def = ", size_tuple_def)
size_tuple_def.items += extra_dims
# In-place modify rhs_def to be getitem
rhs_def.op = 'getitem'
rhs_def.value = get_definition(func_ir, lhs, lhs_only=True)
rhs_def.index = stmt.index
del rhs_def._kws['func']
del rhs_def._kws['args']
del rhs_def._kws['vararg']
del rhs_def._kws['kws']
# success
return True
for label in find_topo_order(func_ir.blocks):
block = func_ir.blocks[label]
for stmt in block.body:
if guard(fix_array_assign, stmt):
block.body.remove(stmt) | 5,354,900 |
def compute_angular_differences(matrix, orientation1, orientation2, cutoff):
""" Compute angular difference between two orientation ndarrays
:param matrix: domain matrix
:type matrix: np.ndarray
:param orientation1: orientation as (x, y, z, 3)
:type orientation1: np.ndarray
:param orientation2: orientation as (x, y, z, 3)
:type orientation2: np.ndarray
:param cutoff: to binarize domain
:type cutoff: (int, int)
:return: angle_errors in degrees, mean, std
:rtype: (np.ndarray, float, float)
"""
if not isinstance(matrix, np.ndarray) or not isinstance(orientation1, np.ndarray) or not isinstance(orientation2, np.ndarray):
raise Exception("Inputs must be ndarrays.")
if not isinstance(cutoff, tuple) or not len(cutoff) == 2:
raise Exception("Cutoff must be a tuple(int, int).")
if not (orientation1.ndim == 4 and orientation2.ndim == 4 and matrix.ndim == 3 and orientation1.shape[3] == 3 and
orientation1.shape == orientation2.shape and orientation1.shape[0] == matrix.shape[0] and
orientation1.shape[1] == matrix.shape[1] and orientation1.shape[2] == matrix.shape[2]):
raise Exception("Incorrect dimensions in input ndarrays.")
mask = np.logical_and(matrix >= cutoff[0], matrix <= cutoff[1])
unit_vectors_1 = orientation1[mask]
unit_vectors_2 = orientation2[mask]
radians_diff = np.einsum('ij,ij->i', unit_vectors_1, unit_vectors_2)
diff = np.zeros((unit_vectors_1.shape[0], 2), dtype=float)
diff[:, 0] = np.degrees(np.arccos(np.clip(radians_diff, -1, 1)))
diff[:, 1] = 180 - diff[:, 0]
diff = np.min(diff, axis=1)
angle_diff = np.zeros_like(matrix, dtype=float)
angle_diff[mask] = diff
return angle_diff, diff.mean(), diff.std() | 5,354,901 |
def await(*args):
"""Runs all the tasks specified in args,
and finally returns args unwrapped.
"""
return _await(args) | 5,354,902 |
def convert_action_move_exists(action, board, player_turn):
"""
Converts action index to chess.Move object.
Assume the action key exists in map_action_uci
:param action:
:param board:
:param player_turn:
:return:
"""
move = chess.Move.from_uci(map_action_uci[action])
if player_turn == chess.BLACK:
move = chess.Move(from_square=chess.square_mirror(move.from_square),
to_square=chess.square_mirror(move.to_square), promotion=move.promotion)
if move.promotion == chess.QUEEN:
move.promotion = None
rank = move.to_square//8
try:
if move.promotion is None and board.piece_at(move.from_square).piece_type == chess.PAWN and \
(rank == 7 or rank == 0):
move.promotion = chess.QUEEN
except AttributeError as err:
print(board, move, action, player_turn)
raise AttributeError(err)
return move | 5,354,903 |
def find_phase_files(input_filePath, run_number=1):
"""
Returns a list of the phase space files, sorted by z position
(filemname , z_approx)
"""
path, infile = os.path.split(input_filePath)
prefix = infile.split('.')[0] # Astra uses inputfile to name output
phase_import_file = ''
phase_files = [];
run_extension = astra_run_extension(run_number)
for file in os.listdir(path):
if re.match(prefix + '.\d\d\d\d.'+run_extension, file):
# Get z position
z = float(file.replace(prefix+ '.', '').replace('.'+run_extension,''))
phase_file=os.path.join(path, file)
phase_files.append((phase_file, z))
# Sort by z
return sorted(phase_files, key=lambda x: x[1]) | 5,354,904 |
def test_password_and_confirmed_password_must_be_the_same():
"""Password and confirmed password must be the same.""" | 5,354,905 |
def _get_system_username():
"""Return the current system user."""
if not win32:
import pwd
return pwd.getpwuid(getuid())[0]
else:
import win32api
import win32security
import win32profile
return win32api.GetUserName() | 5,354,906 |
def _get_vmedia_device():
"""Finds the device filename of the virtual media device using sysfs.
:returns: a string containing the filename of the virtual media device
"""
sysfs_device_models = glob.glob("/sys/class/block/*/device/model")
vmedia_device_model = "virtual media"
for model_file in sysfs_device_models:
try:
with open(model_file) as model_file_fobj:
if vmedia_device_model in model_file_fobj.read().lower():
vmedia_device = model_file.split('/')[4]
return vmedia_device
except Exception:
pass | 5,354,907 |
def create_new_transaction_type(txn_type_model: transaction_type_model.TransactionTypeModel) -> None:
"""Save a new transaction type model"""
txn_type_dto = txn_type_model.export_as_at_rest()
_log.info(f"Adding transaction index for {txn_type_model.txn_type}")
redisearch.create_transaction_index(txn_type_model.txn_type, txn_type_model.custom_indexes)
_log.debug(f"Queuing for activation")
redis.lpush_sync(QUEUED_TXN_TYPES, txn_type_model.txn_type)
_log.debug(f"Adding the transaction type to storage")
storage.put_object_as_json(f"{FOLDER}/{txn_type_model.txn_type}", txn_type_dto) | 5,354,908 |
def convert_to_posixpath(file_path):
"""Converts a Windows style filepath to posixpath format. If the operating
system is not Windows, this function does nothing.
Args:
file_path: str. The path to be converted.
Returns:
str. Returns a posixpath version of the file path.
"""
if not is_windows_os():
return file_path
return file_path.replace('\\', '/') | 5,354,909 |
def generateCards(numberOfSymb):
"""
Generates a list of cards which are themselves a list of symbols needed on each card to respect the rules of Dobble.
This algorithm was taken from the french Wikipedia page of "Dobble".
https://fr.wikipedia.org/wiki/Dobble
:param numberOfSymb: Number of symbols needed on each card.
:type numberOfSymb: int
:returns: List of cards which are list of symbols on it.
:rtype: List[List[int]]
"""
nbSymByCard = numberOfSymb
nbCards = (nbSymByCard**2) - nbSymByCard + 1
cards = []
n = nbSymByCard - 1
t = []
t.append([[(i+1)+(j*n) for i in range(n)] for j in range(n)])
for ti in range(n-1):
t.append([[t[0][((ti+1)*i) % n][(j+i) % n] for i in range(n)] for j in range(n)])
t.append([[t[0][i][j] for i in range(n)] for j in range(n)])
for i in range(n):
t[0][i].append(nbCards - n)
t[n][i].append(nbCards - n + 1)
for ti in range(n-1):
t[ti+1][i].append(nbCards - n + 1 + ti + 1)
t.append([[(i+(nbCards-n)) for i in range(nbSymByCard)]])
for ti in t:
cards = cards + ti
return cards | 5,354,910 |
def map_period_average(
inputfile,
var,
periods=[("2020", "2040"), ("2040", "2060"), ("2060", "2080"), ("2080", "2100")],
):
"""
Produces a dataset containing period averages of 'var' from an input dataset path.
Parameters
---------
inputfile : str
path to data file.
var : str
variable name in dataset
periods: list of tuple of str
"""
# TODO
raise NotImplementedError | 5,354,911 |
def get_metadata(tmpdirname):
"""
Get metadata from kmp.json if it exists.
If it does not exist then will return get_and_convert_infdata
Args:
inputfile (str): path to kmp file
tmpdirname(str): temp directory to extract kmp
Returns:
list[5]: info, system, options, keyboards, files
see kmpmetadata.parsemetadata for details
"""
kmpjson = os.path.join(tmpdirname, "kmp.json")
if os.path.isfile(kmpjson):
return parsemetadata(kmpjson, False)
else:
return get_and_convert_infdata(tmpdirname) | 5,354,912 |
def assert_single_entry_approx_value(
numbers: List[Any],
index: int,
value: float = 1.0,
value_abs_eps: float = 1e-14,
zero_abs_eps: float = 0.0,
) -> None:
"""The input numbers should all be zero except for a single entry,
which should equal the expected value approximately at the given index.
Maybe not exactly equal due to numerical roundoff.
"""
assert index < len(numbers)
for nn in range(len(numbers)):
expected_value = 0.0
eps = zero_abs_eps
if nn == index:
expected_value = value
eps = value_abs_eps
assert abs(numbers[nn] - expected_value) <= eps | 5,354,913 |
def match_facilities(facility_datasets,
authoritative_dataset,
manual_matches_df=None,
max_distance=150,
nearest_n=10,
meters_crs='epsg:5070',
reducer_fn=None):
"""Matches facilities. The dataset represented by the authoritative_dataset key
in the facilities_dfs dict will considered authoritative - all other facilities
in the remaining datasets will be dropped if they are not matched, and the point
location of the authoritative dataset will be used.
Args:
facility_datasets (Dict[str, Dict]): A dictionary keyed by
the dataset ID with values being a dictionary containing keys
'df' containing the dataframe of facility data and 'columns'
containing a FacilityColumns object.
authoritative_dataset: The dataset that contains the facilities all
other datasets will match to.
manual_matches_df: Dataframe containing manually matched facilities. Should contain
columns for each of the ID columns of the datasets with matching IDs in each row.
max_distance (int, optional): The maximum distance (in meters) that two matches can be apart.
Defaults to 150 meters.
nearest_n (int, optional): The number of neighbors to consider as potential options.
Defaults to 10.
meters_crs: The EPSG code for the projection to use for meters distance computations.
Defaults to EPSG:5070 (NAD83 / Conus Albers) for the U.S.
reducer_fn: Function to reduce potentially matched facilities. Defaults to
reduce_matched_facility_records. See that function's signature for required
parameters. Pass in alternate implementations to implement other matching approaches.
Result:
(FacilityMatchResult): The result of the match.
Note:
The resulting dataframes will convert the id columns of any dataset into a str type.
"""
MATCH_ID_SEP = '_-_'
if reducer_fn is None:
reducer_fn = reduce_matched_facility_records
def get_id_column(dataset_key):
return facility_datasets[dataset_key]['columns'].facility_id
def get_matched_set(subcomponent):
"""Method for collecting the data for the reducer_fn based on a
connected subcomponent. Returns the records of the matched set and a dictionary
that records the distances between the facilities.
"""
records = []
distances = {}
manual_matches = set([])
for n in s:
ds, facility_id = deconstruct_match_id(n)
df = facility_datasets[ds]['df']
id_column = facility_datasets[ds]['columns'].facility_id
record = df[df[id_column].astype(str) == facility_id].to_dict(orient='record')[0]
record['dataset'] = ds
record['match_id'] = n
records.append(record)
for u, v in G.edges(n):
edge_data = G.get_edge_data(u, v)
distances[(u, v)] = edge_data['weight']
if ds == authoritative_dataset and edge_data.get('manual_override', False):
connected_ds, _ = deconstruct_match_id(v)
manual_matches.add((u, connected_ds, v))
return records, distances, manual_matches
def construct_match_id(dataset_key, facility_id):
id_column = get_id_column(dataset_key)
return '{}{}{}'.format(
dataset_key,
MATCH_ID_SEP,
facility_id
)
def deconstruct_match_id(match_id):
return match_id.split(MATCH_ID_SEP)
assert authoritative_dataset in facility_datasets
# check that dataset ID columns are unique
dataset_id_columns = [
get_id_column(dataset_key)
for dataset_key in facility_datasets
]
if len(set(dataset_id_columns)) != len(dataset_id_columns):
raise Exception('Dataset ID column names must be unique.')
# Setup a distinct order of datasets
dataset_order = [authoritative_dataset] + sorted([x for x in facility_datasets
if x != authoritative_dataset])
# Set of match_ids
ids = []
# Set of (x,y) points aligned with ids, in meters_crs
pts = []
# Mapping from match_id -> point
ids_to_pts = {}
# Construct a reprojected geodataframe per dataset, and
# record the match ids and points for usage in the KNN
# computation below.
for dataset_key in dataset_order:
df = facility_datasets[dataset_key]['df']
meters_df = df.to_crs(meters_crs)
id_column = get_id_column(dataset_key)
meters_df['match_id'] = '{}{}'.format(dataset_key, MATCH_ID_SEP) + \
meters_df[id_column].astype(str)
facility_datasets[dataset_key]['meters_df'] = meters_df
for _, row in meters_df.iterrows():
match_id = row['match_id']
pt = (row['geometry'].x, row['geometry'].y)
ids_to_pts[match_id] = pt
ids.append(match_id)
pts.append(pt)
# Compute the K Nearest Neighbors for all points in the dataset.
kd_tree = libpysal.cg.KDTree(np.array(pts))
nearest_neighbors = libpysal.weights.KNN(kd_tree, k=nearest_n, ids=ids).neighbors
# For every match, make an edge in a graph. Don't add an edge between
# points that are further than the max distance. The weight of the edge
# is the distance between them in meters.
G = nx.Graph()
for match_id in nearest_neighbors:
source_pt = ids_to_pts[match_id]
G.add_node(match_id)
for neighbor_id in nearest_neighbors[match_id]:
neighbor_pt = ids_to_pts[neighbor_id]
dist = euclidean(source_pt, neighbor_pt)
if dist <= max_distance and not G.has_edge(match_id, neighbor_id):
G.add_edge(match_id, neighbor_id, weight=dist)
# Create edges for manual matches and mark them as such.
if manual_matches_df is not None:
auth_id_column = facility_datasets[authoritative_dataset]['columns'].facility_id
for _, row in manual_matches_df.iterrows():
# Get the authoritative dataset ID (required for each row)
auth_id = construct_match_id(authoritative_dataset, row[auth_id_column])
source_pt = ids_to_pts[auth_id]
for dataset_key in facility_datasets:
if dataset_key != authoritative_dataset:
id_column = facility_datasets[dataset_key]['columns'].facility_id
if id_column in row:
if row[id_column]:
neighbor_id = construct_match_id(dataset_key, row[id_column])
neighbor_pt = ids_to_pts[neighbor_id]
dist = euclidean(source_pt, neighbor_pt)
G.add_edge(auth_id, neighbor_id, weight=dist, manual_override=True)
# Set up a dict to be turned into the matches dataframe,
# and a dict that tracks what non-authoritative datasets
# have been matched.
matches = {}
matched_ids = {}
for dataset_key in dataset_order:
matches[get_id_column(dataset_key)] = []
if dataset_key != authoritative_dataset:
matched_ids[dataset_key] = set([])
dataset_columns = dict([(k, facility_datasets[k]['columns']) for k in facility_datasets])
# Iterate over connected components, which gives us the subgraphs that are
# matched, and pass this into the reduce_matches method to
# reduce down each match to a single matched set.
for s in nx.connected_components(G):
# Ignore components that don't have a point from the authoritative dataset.
if authoritative_dataset in [deconstruct_match_id(m)[0] for m in s]:
records, distances, manual_matches = get_matched_set(s)
if len(records) == 1:
reduced_components = [[records[0]['match_id']]]
else:
authoritative_records = [r for r in records if r['dataset'] == authoritative_dataset]
records_to_match = [r for r in records if r['dataset'] != authoritative_dataset]
reduced_components = reducer_fn(authoritative_records,
records_to_match,
distances,
manual_matches,
dataset_columns)
for match_set in reduced_components:
# Ensure that the set has a facility from the authoritative datatset
assert authoritative_dataset in [deconstruct_match_id(match_id)[0]
for match_id in match_set]
ds_ids = {}
for m in match_set:
dataset_key, facility_id = deconstruct_match_id(m)
ds_ids[dataset_key] = facility_id
if dataset_key != authoritative_dataset:
matched_ids[dataset_key].add(facility_id)
for dataset_key in dataset_order:
col = get_id_column(dataset_key)
if not dataset_key in ds_ids:
matches[col].append(None)
else:
matches[col].append(ds_ids[dataset_key])
# Construct the FacilityMatchResult and return
matches_df = pd.DataFrame.from_dict(matches)
unmatched_per_dataset = {}
for dataset_key in matched_ids:
ids = set(facility_datasets[dataset_key]['df'][get_id_column(dataset_key)].astype(str).values)
unmatched_per_dataset[dataset_key] = ids - matched_ids[dataset_key]
# Merge the dataframes, using the geometry from the authoritative dataset and
# prefixing all but the ID columns by the dataset ID.
merged_df = matches_df
for dataset_key in dataset_order:
df = facility_datasets[dataset_key]['df']
id_column = get_id_column(dataset_key)
if dataset_key != authoritative_dataset:
df_prefixed = df.copy().add_prefix('{}_'.format(dataset_key))
df_prefixed = df_prefixed.rename(columns={'{}_{}'.format(dataset_key, id_column): id_column})
df_prefixed = df_prefixed.drop(columns=['{}_geometry'.format(dataset_key)])
else:
df_prefixed = df.copy()
df_prefixed[id_column] = df_prefixed[id_column].astype(str)
merged_df = merged_df.merge(df_prefixed, on=id_column, how='left')
merged_df = gpd.GeoDataFrame(merged_df, crs='epsg:4326') \
.sort_values([facility_datasets[dataset_key]['columns'].facility_id
for dataset_key in dataset_order])
return FacilityMatchResult(merged_df, matches_df, unmatched_per_dataset) | 5,354,914 |
def has_field_warning(meta, field_id):
"""Warn if dataset has existing field with same id."""
if meta.has_field(field_id):
print(
"WARN: Field '%s' is already present in dataset, not overwriting."
% field_id
)
print("WARN: Use '--replace' flag to overwrite existing field.")
return 1
return 0 | 5,354,915 |
def volume(surface):
"""Compute volume of a closed triangulated surface mesh."""
properties = vtk.vtkMassProperties()
properties.SetInput(surface)
properties.Update()
return properties.GetVolume() | 5,354,916 |
def parse_json_to_csv(json_file_in, csv_file_out):
"""
This method will take in a JSON file parse it and will generate a CSV file with the
same content.
Parameters:
json_file_in -- Input file containing list of JSON objects
csv_file_out -- Output file for CSV content
"""
with open(json_file_in, encoding='utf-8-sig') as jsonfile:
dict_list = json.load(jsonfile)
dict_list_to_csv(dict_list, csv_file_out) | 5,354,917 |
def simulate():
"""
Simulate one thing
"""
doors = getRandomDoorArray()
pickedDoor = chooseDoor()
goatDoor, switchDoor = openGoatDoor(pickedDoor, doors)
return doors[pickedDoor], doors[switchDoor] | 5,354,918 |
def depart(visitor: DocxTranslator, node: Node):
"""Finish processing image node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node) | 5,354,919 |
def fc_layer(x):
"""Basic Fully Connected (FC) layer with an activation function."""
return x | 5,354,920 |
def get_slot_counts(cls: type) -> Dict[str, int]:
"""
Collects all of the given class's ``__slots__``, returning a
dict of the form ``{slot_name: count}``.
:param cls: The class whose slots to collect
:return: A :class:`collections.Counter` counting the number of occurrences of each slot
"""
slot_names = (name for name, _ in iter_slots(cls))
return collections.Counter(slot_names) | 5,354,921 |
def sequence_pipeline(message):
"""Sequence pipeline.
Send to test backend twice.
"""
yield send(NullOutput(test_arg=2))
yield send(NullOutput(test_arg=1)) | 5,354,922 |
def rot6d_to_axisAngle(x):
""""Convert 6d rotation representation to axis angle
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3) Batch of corresponding axis angle
"""
rotMat = rot6d_to_rotmat(x)
return rotationMatrix_to_axisAngle(rotMat) | 5,354,923 |
def mark_boundaries(simulation):
"""
Mark the boundaries of the mesh with different numbers to be able to
apply different boundary conditions to different regions
"""
simulation.log.info('Creating boundary regions')
# Create a function to mark the external facets
mesh = simulation.data['mesh']
marker = dolfin.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
mesh_facet_regions = simulation.data['mesh_facet_regions']
# Create boundary regions and let them mark the part of the
# boundary that they belong to. They also create boundary
# condition objects that are later used in the eq. solvers
boundary = []
for index, _ in enumerate(simulation.input.get_value('boundary_conditions', [], 'list(dict)')):
part = BoundaryRegion(simulation, marker, index, mesh_facet_regions)
boundary.append(part)
simulation.data['boundary'] = boundary
simulation.data['boundary_marker'] = marker
simulation.data['boundary_by_name'] = {b.name: b for b in boundary}
# Create a boundary measure that is aware of the marked regions
mesh = simulation.data['mesh']
ds = dolfin.Measure('ds', domain=mesh, subdomain_data=marker)
simulation.data['ds'] = ds
# Show region sizes
one = dolfin.Constant(1)
for region in boundary:
length = dolfin.assemble(one * ds(region.mark_id, domain=mesh))
pf = simulation.log.info if length > 0.0 else simulation.log.warning
pf(' Boundary region %s has size %f' % (region.name, length))
length0 = dolfin.assemble(one * ds(0, domain=mesh))
pf = simulation.log.info if length0 == 0.0 else simulation.log.warning
pf(' Boundary region UNMARKED has size %f' % length0)
# Optionally plot boundary regions to file
if simulation.input.get_value('output/plot_bcs', False, 'bool'):
prefix = simulation.input.get_value('output/prefix', '', 'string')
pfile = prefix + '_boundary_regions.xdmf'
simulation.log.info(' Plotting boundary regions to ' 'XDMF file %r' % pfile)
with dolfin.XDMFFile(mesh.mpi_comm(), pfile) as xdmf:
xdmf.write(marker) | 5,354,924 |
def within_tolerance(x, y, tolerance):
"""
Check that |x-y| <= tolerance with appropriate norm.
Args:
x: number or array (np array_like)
y: number or array (np array_like)
tolerance: Number or PercentageString
NOTE: Calculates x - y; may raise an error for incompatible shapes.
Usage
=====
The tolerance can be a number:
>>> within_tolerance(10, 9.01, 1)
True
>>> within_tolerance(10, 9.01, 0.5)
False
If tolerance is a percentage, it is a percent of (the norm of) x:
>>> within_tolerance(10, 9.01, '10%')
True
>>> within_tolerance(9.01, 10, '10%')
False
Works for vectors and matrices:
>>> A = np.array([[1,2],[-3,1]])
>>> B = np.array([[1.1, 2], [-2.8, 1]])
>>> diff = round(np.linalg.norm(A-B), 6)
>>> diff
0.223607
>>> within_tolerance(A, B, 0.25)
True
"""
# When used within graders, tolerance has already been
# validated as a Number or PercentageString
if isinstance(tolerance, six.text_type):
tolerance = np.linalg.norm(x) * percentage_as_number(tolerance)
difference = x - y
return np.linalg.norm(difference) <= tolerance | 5,354,925 |
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c | 5,354,926 |
def test_log() -> None:
"""Test log and log filtering."""
logs = simplejson.loads(execute_cli_command(["--json", "log"]))
assert len(logs) == 0
execute_cli_command(["branch", "dev_test_log"])
table = _new_table("test_log_dev")
make_commit("log.foo.dev", table, "dev_test_log", author="nessie_user1")
table = _new_table("test_log")
make_commit("log.foo.bar", table, "main", author="nessie_user1", message="commit to main")
tables = ContentSchema().loads(execute_cli_command(["--json", "content", "view", "log.foo.bar"]), many=True)
assert len(tables) == 1
assert tables[0] == table
ext_logs: List[LogEntry] = LogEntrySchema().loads(execute_cli_command(["--json", "log", "-x"]), many=True)
assert (
len(ext_logs) == 1
and ext_logs[0].commit_meta.message == "commit to main"
and ext_logs[0].commit_meta.author == "nessie_user1"
and ext_logs[0].parent_commit_hash is not None
and len(ext_logs[0].operations) == 1
and ext_logs[0].operations[0].key == ContentKey.from_path_string("log.foo.bar")
)
simple_logs: List[CommitMeta] = CommitMetaSchema().loads(execute_cli_command(["--json", "log"]), many=True)
assert len(simple_logs) == 1 and simple_logs[0].message == "commit to main" and simple_logs[0].author == "nessie_user1"
logs = simplejson.loads(execute_cli_command(["--json", "log"]))
assert len(logs) == 1
logs = simplejson.loads(execute_cli_command(["--json", "log", "--revision-range", logs[0]["hash"]]))
assert len(logs) == 1
entries = EntrySchema().loads(execute_cli_command(["--json", "content", "list"]), many=True)
assert len(entries) == 1
execute_cli_command(
[
"--json",
"content",
"commit",
"log.foo.bar",
"-R",
"--ref",
"main",
"-m",
"delete_message",
"-c",
logs[0]["hash"],
"--author",
"nessie_user2",
],
)
logs = simplejson.loads(execute_cli_command(["--json", "log", "-n", 1]))
assert len(logs) == 1
logs = simplejson.loads(execute_cli_command(["--json", "log", "dev_test_log"]))
assert len(logs) == 1
logs = simplejson.loads(execute_cli_command(["--json", "log"]))
assert len(logs) == 2
logs = simplejson.loads(execute_cli_command(["--json", "log", "--revision-range", "{}..{}".format(logs[0]["hash"], logs[1]["hash"])]))
assert len(logs) == 1
logs = simplejson.loads(execute_cli_command(["--json", "log"]))
assert len(logs) == 2
logs = simplejson.loads(execute_cli_command(["--json", "log", "--author", "nessie_user1"]))
assert len(logs) == 1
assert_that(logs[0]["author"]).is_equal_to("nessie_user1")
logs = simplejson.loads(execute_cli_command(["--json", "log", "--author", "nessie_user2"]))
assert len(logs) == 1
assert_that(logs[0]["author"]).is_equal_to("nessie_user2")
logs = simplejson.loads(execute_cli_command(["--json", "log", "--author", "nessie_user2", "--author", "nessie_user1"]))
assert len(logs) == 2
# the committer is set on the server-side and is empty if we're not logged
# in when performing a commit
logs = simplejson.loads(execute_cli_command(["--json", "log", "--committer", ""]))
assert len(logs) == 2
logs = simplejson.loads(
execute_cli_command(["--json", "log", "--query", "commit.author == 'nessie_user2' || commit.author == 'non_existing'"])
)
assert len(logs) == 1
logs = simplejson.loads(
execute_cli_command(["--json", "log", "--after", "2001-01-01T00:00:00+00:00", "--before", "2999-12-30T23:00:00+00:00"])
)
assert len(logs) == 2 | 5,354,927 |
def model_netradiation(minTair = 0.7,
maxTair = 7.2,
albedoCoefficient = 0.23,
stefanBoltzman = 4.903e-09,
elevation = 0.0,
solarRadiation = 3.0,
vaporPressure = 6.1,
extraSolarRadiation = 11.7):
"""
- Description:
* Title: NetRadiation Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA Montpellier
* Abstract: It is calculated at the surface of the canopy and is givenby the difference between incoming and outgoing radiation of both short
and long wavelength radiation
- inputs:
* name: minTair
** min : -30
** default : 0.7
** max : 45
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : °C
** description : minimum air temperature
* name: maxTair
** min : -30
** default : 7.2
** max : 45
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : °C
** description : maximum air Temperature
* name: albedoCoefficient
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0.23
** inputtype : parameter
** unit :
** description : albedo Coefficient
* name: stefanBoltzman
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 4.903E-09
** inputtype : parameter
** unit :
** description : stefan Boltzman constant
* name: elevation
** parametercategory : constant
** min : -500
** datatype : DOUBLE
** max : 10000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0
** inputtype : parameter
** unit : m
** description : elevation
* name: solarRadiation
** min : 0
** default : 3
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m-2 d-1
** description : solar Radiation
* name: vaporPressure
** min : 0
** default : 6.1
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : hPa
** description : vapor Pressure
* name: extraSolarRadiation
** min : 0
** default : 11.7
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m2 d-1
** description : extra Solar Radiation
- outputs:
* name: netRadiation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : MJ m-2 d-1
** description : net radiation
* name: netOutGoingLongWaveRadiation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : net OutGoing Long Wave Radiation
"""
Nsr = (1.0 - albedoCoefficient) * solarRadiation
clearSkySolarRadiation = (0.75 + (2 * pow(10.0, -5) * elevation)) * extraSolarRadiation
averageT = (pow(maxTair + 273.16, 4) + pow(minTair + 273.16, 4)) / 2.0
surfaceEmissivity = 0.34 - (0.14 * sqrt(vaporPressure / 10.0))
cloudCoverFactor = 1.35 * (solarRadiation / clearSkySolarRadiation) - 0.35
Nolr = stefanBoltzman * averageT * surfaceEmissivity * cloudCoverFactor
netRadiation = Nsr - Nolr
netOutGoingLongWaveRadiation = Nolr
return (netRadiation, netOutGoingLongWaveRadiation) | 5,354,928 |
def main():
"""
Testing function for DFA brzozowski algebraic method Operation
"""
argv = sys.argv
if len(argv) < 2:
targetfile = 'target.y'
else:
targetfile = argv[1]
print 'Parsing ruleset: ' + targetfile,
flex_a = Flexparser()
mma = flex_a.yyparse(targetfile)
print 'OK'
print 'Perform minimization on initial automaton:',
print 'OK'
print 'Perform Brzozowski on minimal automaton:',
brzozowski_a = Regex(mma)
mma_regex = brzozowski_a.get_regex()
print mma_regex | 5,354,929 |
def get_tagset(sentences, with_prefix):
""" Returns the set of entity types appearing in the list of sentences.
If with_prefix is True, it returns both the B- and I- versions for each
entity found. If False, it merges them (i.e., removes the prefix and only
returns the entity type).
"""
iobs = [iob for sent in sentences for (x,iob) in sent]
tagset = set(iobs)
if not with_prefix:
tagset = set([t[2:] for t in list(tagset) if t != 'O'])
return tagset | 5,354,930 |
def pdb_to_psi4(starting_geom, mol_name, method, basis_set, charge=0, multiplicity=1, symmetry='C1', geom_opt=True,
sp_energy=False, fixed_dih=None, mem=None, constrain='dihedral', dynamic_level=3,
consecutive_backsteps=None, geom_maxiter=250, xyz_traj=True):
"""
:param pdb: str
path to pdb file
:param method: list of str
QM method (see psi4 website for options)
If length 2, first one will be used for geom opt and second for spe.
:param basis_set: str
specification of basis set
:param symmetry: str
symmetry of molecule. Default is None.
:param geom_opt: bool
if True, will generate input file for geometry optimization
:param sp_energy: bool
if True, will run a single point energy calculation (if geom_opt also true, SPE calculation will occur after
geom opt
:param fixed_dih: str
string of dihedral that should be fixed at specified angle. Format: "4 7 10 14 90.00"
default: None - will not fix dihedral
Beware:
------
Because of a bug in psi4, dihedral angle can't be exactly 0 (same would apply for 180) so use 0.001 instead
constrain: string. Either 'dihedral' or 'cartesian'
The kind of constrain to use
:param mem: int
memory allocation for calculation
:param outfile: str
if specified, will save file there
:return:
psi4 input string. If outfile, save file to specified path
"""
input_string = ""
if mem is not None:
input_string += "\nmemory {}\n".format(mem)
input_string += "\nmolecule {}".format(mol_name)
input_string += " {\n"
input_string += " symmetry {}\n".format(symmetry)
input_string += " {} {} \n".format(charge, multiplicity)
input_string += starting_geom
input_string += " units Angstrom\n"
input_string += "}\n"
if fixed_dih is not None:
if constrain == 'dihedral':
input_string += '\ndih_string = "{}"'.format(fixed_dih)
# ToDo add string because that's the only thing that seems to work
input_string += '\nset optking { fixed_dihedral = $dih_string\n'
elif constrain == 'cartesian':
input_string += '\n frozen_string = """ \n {} xyz \n {} xyz \n {} xyz \n {} xyz \n"""'.format(fixed_dih[0],
fixed_dih[2],
fixed_dih[4],
fixed_dih[6])
input_string += '\nset optking { opt_coordinates = cartesian\n frozen_cartesian = $frozen_string\n'
else:
raise NameError('Only dihedral or cartesian constraints are valid')
if dynamic_level:
input_string += ' dynamic_level = {}\n'.format(dynamic_level)
if consecutive_backsteps:
input_string += ' consecutive_backsteps = {}\n'.format(consecutive_backsteps)
if geom_maxiter:
input_string += ' geom_maxiter = {}\n'.format(geom_maxiter)
if xyz_traj:
input_string += ' print_trajectory_xyz_file = True '
input_string += '}\n'
if geom_opt:
input_string += "\noptimize('{}/{}')\n".format(method[0], basis_set[0])
if sp_energy:
input_string += "\nenergy('{}/{}')\n".format(method[-1], basis_set[-1])
return input_string | 5,354,931 |
def normpath(s: str) -> str:
"""Normalize path. Just for compatibility with normal python3."""
return s | 5,354,932 |
def threshold_num_spikes(
sorting,
threshold,
threshold_sign,
sampling_frequency=None,
**kwargs
):
"""
Computes and thresholds the num spikes in the sorted dataset with the given sign and value.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
threshold: int or float
The threshold for the given metric
threshold_sign: str
If 'less', will threshold any metric less than the given threshold
If 'less_or_equal', will threshold any metric less than or equal to the given threshold
If 'greater', will threshold any metric greater than the given threshold
If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
threshold sorting extractor
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None, apply_filter=False,
duration_in_frames=None, freq_min=300.0, freq_max=6000.0, unit_ids=None,
verbose=params_dict['verbose'], raise_if_empty=False)
ns = NumSpikes(metric_data=md)
threshold_sorting = ns.threshold_metric(threshold, threshold_sign, **kwargs)
return threshold_sorting | 5,354,933 |
def mad(stack, axis=0, scale=1.4826):
"""Median absolute deviation,
default is scaled such that +/-MAD covers 50% (between 1/4 and 3/4)
of the standard normal cumulative distribution
"""
stack_abs = np.abs(stack)
med = np.nanmedian(stack_abs, axis=axis)
return scale * np.nanmedian(np.abs(stack_abs - med), axis=axis) | 5,354,934 |
def set_up_logging(
*,
log_filename: str = "log",
verbosity: int = 0,
use_date_logging: bool = False,
) ->logging.Logger:
"""Set up proper logging."""
# log everything verbosely
LOG.setLevel(logging.DEBUG)
logging.Formatter.converter = time.gmtime
handler: Any
if use_date_logging:
handler = TimedRotatingFileHandler(
filename=log_filename,
when="D",
utc=True,
)
else:
handler = RotatingFileHandler(
filename=log_filename,
maxBytes=1024000000,
backupCount=10,
)
formatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)03dZ - %(levelname)s - %(module)s - %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
LOG.addHandler(handler)
# Provide a stdout handler logging at INFO.
stream_handler = logging.StreamHandler(sys.stdout)
simple_form = logging.Formatter(fmt="%(message)s")
stream_handler.setFormatter(simple_form)
if verbosity > 0:
stream_handler.setLevel(logging.DEBUG)
else:
stream_handler.setLevel(logging.INFO)
LOG.addHandler(stream_handler)
return LOG | 5,354,935 |
def dir_tree(root, dir_exclude=None, ext_exclude=None):
""" Return all files at or under root directory """
ext_exclude = [] if ext_exclude is None else ext_exclude
ext_exclude = ['.'+item for item in ext_exclude]
dir_exclude = [] if dir_exclude is None else dir_exclude
dir_exclude = [os.path.join(root, item) for item in dir_exclude]
for dname, _, fnames in os.walk(root):
for fname in fnames:
_, ext = os.path.splitext(fname)
if any([dname.startswith(item) for item in dir_exclude]):
continue
if ext not in ext_exclude:
yield os.path.join(dname, fname) | 5,354,936 |
def _get_window(append, size=(1000, 600)):
"""
Return a handle to a plot window to use for this plot.
If append is False, create a new plot window, otherwise return
a handle to the given window, or the last created window.
Args:
append (Union[bool, PlotWindow]): If true, return the last
created plot window, if PlotWindow, return that window, otherwise
a new window will be created.
size (Tuple[int, int]): The size in px of the new plot window. If append
is not false, this parameter has no effect.
"""
# Set up a plotting window
if append is None or append is False:
win = PlotWindow()
win.win_title = 'ID: '
win.resize(*size)
elif isinstance(append, PlotWindow):
# Append to the given window
win = append
elif isinstance(append, bool):
# Append to the last trace if true
win = PlotWindow.getWindows()[-1]
else:
raise ValueError("Unknown argument to append. Either give a plot window"
" or true to append to the last plot")
return win | 5,354,937 |
def cufftExecC2C(plan, idata, odata, direction=CUFFT_FORWARD):
""" Execute the planned complex->complex FFT.
Parameters
----------
`plan` : cufftHandle
The plan handle.
`idata` : pointer to cufftComplex array
`odata` : pointer to cufftComplex array
The input and output arrays. They may be the same for an in-place FFT.
`direction` : int, optional
Either CUFFT_FORWARD or CUFFT_INVERSE.
Raises
------
AssertionError if the argument is bad.
CufftError if there is an error calling the CUFFT library.
"""
assert isinstance(plan, cufftHandle)
# TODO: check pointer validity.
# TODO: accept contiguous numpy arrays.
assert direction in (CUFFT_FORWARD, CUFFT_INVERSE)
result = _cufftExecC2C(plan, idata, odata, direction)
checkCufftResult(result) | 5,354,938 |
def _proxies_dict(proxy):
"""Makes a proxy dict appropriate to pass to requests."""
if not proxy:
return None
return {'http': proxy, 'https': proxy} | 5,354,939 |
def distribute(data_schema, columns, entity_ids=None, codes=None, histnorm="percent", nbinsx=20, filters=None):
"""
distribute indicators(columns) of entities
:param data_schema:
:param columns:
:param entity_ids:
:param codes:
:param histnorm: "percent", "probability", default "percent"
:param nbinsx:
:param filters:
"""
columns = ["entity_id", "timestamp"] + columns
df = data_schema.query_data(entity_ids=entity_ids, codes=codes, columns=columns, filters=filters)
if not entity_ids or codes:
df["entity_id"] = "entity_x_distribute"
distribute_df(df=df, histnorm=histnorm, nbinsx=nbinsx) | 5,354,940 |
def _get_schedule_times(name, date):
"""
Fetch all `from_time` from [Healthcare Schedule Time Slot]
:param name: [Practitioner Schedule]
:param date: [datetime.date]
:return:
"""
mapped_day = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
time_slots = frappe.get_all(
'Healthcare Schedule Time Slot',
filters={'parent': name, 'day': mapped_day[date.weekday()]},
fields=['from_time']
)
return list(map(lambda x: x.get('from_time'), time_slots)) | 5,354,941 |
def setConsoleName(name):
"""
This sets the name of the python logger that represents the console.
"""
_console_name = name | 5,354,942 |
def delete(client, data, force=False):
"""
"""
param = {'logical-router-port-id': get_id(client, data)}
if force:
param['force'] = True
request = client.__getattr__(MODULE).DeleteLogicalRouterPort(**param)
response, _ = request.result()
return response | 5,354,943 |
def doc():
"""
Static methods are methods that are related to a class in some way,
but don’t need to access any class-specific data. You don’t have to use self,
and you don’t even need to instantiate an instance, you can simply call your method.
The @staticmethod decorator is used to tell Python that this method is a static method.
""" | 5,354,944 |
def z_standardization(
spark,
idf,
list_of_cols="all",
drop_cols=[],
pre_existing_model=False,
model_path="NA",
output_mode="replace",
print_impact=False,
):
"""
Standardization is commonly used in data pre-processing process. z_standardization standardizes the selected
attributes of an input dataframe by normalizing each attribute to have standard deviation of 1 and mean of 0. For
each attribute, the standard deviation (s) and mean (u) are calculated and a sample x will be standardized into (
x-u)/s. If the standard deviation of an attribute is 0, it will be excluded in standardization and a warning will
be shown. None values will be kept as None in the output dataframe.
Parameters
----------
spark
Spark Session
idf
Input Dataframe
list_of_cols
List of numerical columns to transform e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
"all" can be passed to include all numerical columns for analysis. This is super useful instead of specifying all column names manually.
Please note that this argument is used in conjunction with drop_cols i.e. a column mentioned in
drop_cols argument is not considered for analysis even if it is mentioned in list_of_cols. (Default value = "all")
drop_cols
List of columns to be dropped e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
It is most useful when coupled with the “all” value of list_of_cols, when we need to consider all columns except
a few handful of them. (Default value = [])
pre_existing_model
Boolean argument – True or False. True if model files (Mean/stddev for each feature) exists already, False Otherwise (Default value = False)
model_path
If pre_existing_model is True, this argument is path for referring the pre-saved model.
If pre_existing_model is False, this argument can be used for saving the model.
Default "NA" means there is neither pre-existing model nor there is a need to save one.
output_mode
"replace", "append".
“replace” option replaces original columns with transformed column. “append” option append transformed
column to the input dataset with a postfix "_scaled" e.g. column X is appended as X_scaled. (Default value = "replace")
print_impact
True, False (Default value = False)
This argument is to print out the before and after descriptive statistics of rescaled columns.
Returns
-------
DataFrame
Rescaled Dataframe
"""
num_cols = attributeType_segregation(idf)[0]
if list_of_cols == "all":
list_of_cols = num_cols
if isinstance(list_of_cols, str):
list_of_cols = [x.strip() for x in list_of_cols.split("|")]
if isinstance(drop_cols, str):
drop_cols = [x.strip() for x in drop_cols.split("|")]
list_of_cols = list(set([e for e in list_of_cols if e not in drop_cols]))
if any(x not in num_cols for x in list_of_cols):
raise TypeError("Invalid input for Column(s)")
if len(list_of_cols) == 0:
warnings.warn(
"No Standardization Performed - No numerical column(s) to transform"
)
return idf
if output_mode not in ("replace", "append"):
raise TypeError("Invalid input for output_mode")
parameters = []
excluded_cols = []
if pre_existing_model:
df_model = spark.read.parquet(model_path + "/z_standardization")
for i in list_of_cols:
mapped_value = (
df_model.where(F.col("feature") == i)
.select("parameters")
.rdd.flatMap(lambda x: x)
.collect()[0]
)
parameters.append(mapped_value)
else:
for i in list_of_cols:
mean, stddev = idf.select(F.mean(i), F.stddev(i)).first()
parameters.append(
[float(mean) if mean else None, float(stddev) if stddev else None]
)
if stddev:
if round(stddev, 5) == 0.0:
excluded_cols.append(i)
else:
excluded_cols.append(i)
if len(excluded_cols) > 0:
warnings.warn(
"The following column(s) are excluded from standardization because the standard deviation is zero:"
+ str(excluded_cols)
)
odf = idf
for index, i in enumerate(list_of_cols):
if i not in excluded_cols:
modify_col = (i + "_scaled") if (output_mode == "append") else i
odf = odf.withColumn(
modify_col, (F.col(i) - parameters[index][0]) / parameters[index][1]
)
if (not pre_existing_model) & (model_path != "NA"):
df_model = spark.createDataFrame(
zip(list_of_cols, parameters), schema=["feature", "parameters"]
)
df_model.coalesce(1).write.parquet(
model_path + "/z_standardization", mode="overwrite"
)
if print_impact:
if output_mode == "replace":
output_cols = list_of_cols
else:
output_cols = [
(i + "_scaled") for i in list_of_cols if i not in excluded_cols
]
print("Before: ")
idf.select(list_of_cols).describe().show(5, False)
print("After: ")
odf.select(output_cols).describe().show(5, False)
return odf | 5,354,945 |
def event_stats(wit_df, wit_im, wit_area, pkey='SYSID'):
"""
Compute inundation event stats with given wit wetness, events defined by (start_time, end_time)
and polygon areas
input:
wit_df: wetness computed from wit data
wit_im: inundation event
wit_area: polygon areas indexed by the key
output:
dataframe of event stats
"""
grouped_im = wit_im[['start_time', 'end_time']].groupby(pkey)
return wit_df.groupby(pkey).apply(get_im_stats, im_time=grouped_im, wit_area=wit_area).droplevel(0) | 5,354,946 |
def upvote_book(book_id):
"""
Allows a user to upvote a book.
The upvotes field on the book document is updated,
as well as the booksUpvoted array on the user document
and the upvotedBy array on the book document.
"""
user_to_update = mongo.db.users.find_one({"username": session["user"]})
username = user_to_update.get("username")
mongo.db.books.update_one({"_id": ObjectId(book_id)}, {
'$inc': {'upvotes': +1}})
mongo.db.books.update_one({"_id": ObjectId(book_id)}, {
'$push': {'upvotedBy': username}})
mongo.db.users.update_one(
user_to_update, {'$push': {'booksUpvoted': ObjectId(book_id)}})
flash("Book has been upvoted!")
return redirect(url_for("get_book", book_id=book_id)) | 5,354,947 |
def bottlegrowth_split_mig(params, ns):
"""
params = (nuB, nuF, m, T, Ts)
ns = [n1, n2]
Instantanous size change followed by exponential growth then split with
migration.
nuB: Ratio of population size after instantanous change to ancient
population size
nuF: Ratio of contempoary to ancient population size
m: Migration rate between the two populations (2*Na*m).
T: Time in the past at which instantaneous change happened and growth began
(in units of 2*Na generations)
Ts: Time in the past at which the two populations split.
n1, n2: Sample sizes of resulting Spectrum.
"""
nuB, nuF, m, T, Ts = params
nu_func = lambda t: [nuB * numpy.exp(numpy.log(nuF/nuB) * t / T)]
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs.integrate(nu_func, T - Ts, dt_fac=0.01)
# we split the population
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
nu0 = nu_func(T - Ts)[0]
nu_func = lambda t: 2 * [nu0 * numpy.exp(numpy.log(nuF/nu0) * t / Ts)]
fs.integrate(nu_func, Ts, m = numpy.array([[0, m], [m, 0]]))
return fs | 5,354,948 |
def arch_to_macho(arch):
"""Converts an arch string into a macho arch tuple."""
try:
arch = rustcall(lib.symbolic_arch_to_macho, encode_str(arch))
return (arch.cputype, arch.cpusubtype)
except ignore_arch_exc:
pass | 5,354,949 |
def visualize_3d(img, proc_param, joints, verts, cam, joints3d):
"""
Renders the result in original image coordinate frame.
"""
import matplotlib.pyplot as plt
cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
proc_param, verts, cam, joints, img_size=img.shape[:2])
# Render results
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
print("Joints shape 3d:" + str(joints3d.shape))
ax = vis_util.draw_skeleton_3d(joints3d, ax)
#plt = vis_util.draw_skeleton_3d(img, joints_orig, plt)
ax1 = fig.add_subplot(122)
skel_img = vis_util.draw_skeleton(img, joints_orig)
ax1.imshow(skel_img)
# plt.ion()
plt.title('diff vp')
plt.axis('off')
plt.draw()
plt.show() | 5,354,950 |
def limit_ops_skeleton(**kwargs):
"""This function provides a skeleton for limit ops calculations"""
group_phase = kwargs['group_phase']
tail = kwargs['tail']
loading_phase = kwargs['loading_phase']
final_phase = kwargs['final_phase']
grouped_df = limit_ops_general_groups(
**group_phase
)
grouped_df = grouped_df.tail(tail)
loaded_table = load_and_rename(**loading_phase)
final_phase['first_df'] = grouped_df
final_phase['second_df'] = loaded_table
final_values = limit_ops_formatter(**final_phase)
return final_values | 5,354,951 |
def xy2traceset(xpos, ypos, **kwargs):
"""Convert from x,y positions to a trace set.
Parameters
----------
xpos, ypos : array-like
X,Y positions corresponding as [nx,Ntrace] arrays.
invvar : array-like, optional
Inverse variances for fitting.
func : :class:`str`, optional
Function type for fitting; defaults to 'legendre'.
ncoeff : :class:`int`, optional
Number of coefficients to fit. Defaults to 3.
xmin, xmax : :class:`float`, optional
Explicitly set minimum and maximum values, instead of computing
them from `xpos`.
maxiter : :class:`int`, optional
Maximum number of rejection iterations; set to 0 for no rejection;
default to 10.
inmask : array-like, optional
Mask set to 1 for good points and 0 for rejected points;
same dimensions as `xpos`, `ypos`. Points rejected by `inmask`
are always rejected from the fits (the rejection is "sticky"),
and will also be marked as rejected in the outmask attribute.
ia, inputans, inputfunc : array-like, optional
These arguments will be passed to :func:`func_fit`.
xjumplo : :class:`float`, optional
x position locating start of an x discontinuity
xjumphi : :class:`float`, optional
x position locating end of that x discontinuity
xjumpval : :class:`float`, optional
magnitude of the discontinuity "jump" between those bounds
(previous 3 keywords motivated by BOSS 2-phase readout)
Returns
-------
:class:`TraceSet`
A :class:`TraceSet` object.
"""
return TraceSet(xpos, ypos, **kwargs) | 5,354,952 |
def Fn(name, f, n_out=1): # pylint: disable=invalid-name
"""Returns a layer with no weights that applies the function `f`.
`f` can take and return any number of arguments, and takes only positional
arguments -- no default or keyword arguments. It often uses JAX-numpy (`jnp`).
The following, for example, would create a layer that takes two inputs and
returns two outputs -- element-wise sums and maxima:
`Fn('SumAndMax', lambda x0, x1: (x0 + x1, jnp.maximum(x0, x1)), n_out=2)`
The layer's number of inputs (`n_in`) is automatically set to number of
positional arguments in `f`, but you must explicitly set the number of
outputs (`n_out`) whenever it's not the default value 1.
Args:
name: Class-like name for the resulting layer; for use in debugging.
f: Pure function from input tensors to output tensors, where each input
tensor is a separate positional arg, e.g., `f(x0, x1) --> x0 + x1`.
Output tensors must be packaged as specified in the `Layer` class
docstring.
n_out: Number of outputs promised by the layer; default value 1.
Returns:
Layer executing the function `f`.
"""
argspec = inspect.getfullargspec(f)
if argspec.defaults is not None:
raise ValueError('Function has default arguments (not allowed).')
if argspec.varkw is not None:
raise ValueError('Function has keyword arguments (not allowed).')
if argspec.varargs is not None:
raise ValueError('Function has variable args (not allowed).')
def _forward(xs): # pylint: disable=invalid-name
if not isinstance(xs, (tuple, list)):
xs = (xs,)
return f(*xs)
n_in = len(argspec.args)
name = name or 'Fn'
return PureLayer(_forward, n_in=n_in, n_out=n_out, name=name) | 5,354,953 |
def leer_pdf_slate(ubicacion_archivo, password=None):
"""
Utiliza la librería slate3k para cargar un archivo PDF y extraer el texto de sus páginas.
:param ubicacion_archivo: (str). Ubicación del archivo PDF que se desea leer.
:param password: (str). Valor por defecto: None. Parámetro opcional para leer archivos \
PDF que están protegidos por contraseña.
:return: (list). Lista de strings, que contienen el texto extraído de cada página del PDF.
"""
import slate3k as slate
# Para no mostrar warnings de slate
import logging
logging.getLogger('pdfminer').setLevel(logging.ERROR)
# Abrir el archivo y extraer el texto de las páginas
with open(ubicacion_archivo, 'rb') as f:
if password is not None:
paginas = slate.PDF(f, password)
else:
paginas = slate.PDF(f)
# Retornar el texto extraído
return paginas | 5,354,954 |
def gen_run_entry_str(query_id, doc_id, rank, score, run_id):
"""A simple function to generate one run entry.
:param query_id: query id
:param doc_id: document id
:param rank: entry rank
:param score: entry score
:param run_id: run id
"""
return f'{query_id} Q0 {doc_id} {rank} {score} {run_id}' | 5,354,955 |
async def mesh_auto_send(args):
"""Asynchronously sends messages from the queue via mesh link
"""
send_method, mesh_queue, gid = args
while True:
async for data in mesh_queue:
send_method(gid=gid, message=data, binary=True) | 5,354,956 |
def remove_template(args, output_file_name):
"""
remove the arg to use template; called when you make the template
:param args:
:param output_file_name:
:return:
"""
template_name = ''
dir_name = ''
template_found = False
for i in args:
if i.startswith('--template'):
# print_fun('FOUND')
args.remove(i)
# i ='--template=/s/telos/common/sjm-doc-template.tex'
# eq_loc = i.find("=") + 1
# dir_end = len(i) - i[::-1].find('/') - 1
# dir_name = i[eq_loc:dir_end]
# template_name = i[dir_end + 1:-4]
# template_found = True
# new
p = Path(i.split('=')[1])
dir_name = str(p.parent)
template_name = str(p.name)
template_found = True
# print_fun(template_name)
break
if not template_found:
raise ValueError('\n\n\nERROR: making template, need cla: --template=/template name... command line option!\n'
f'Args are {args}\nAborting.\n\n')
return
args, trash = adjust_output_file(args, dir_name, output_file_name)
return args, template_name | 5,354,957 |
def replaceext(filepath, new_ext, *considered_exts):
"""replace extension of filepath with new_ext
filepath: a file path
new_ext: extension the returned filepath should have (e.g ".ext")
considered_exts: Each is a case insensitive extension that should be considered a
single extension and replaced accordingly. e.g. if you pass .tar.gz, file.tar.gz
becomes file.new_ext instead of file.tar.new_ext
returns: filepath with its extension replaced
"""
root = splitext(filepath, *considered_exts)[0]
return root + new_ext | 5,354,958 |
def do_rename(cs, args):
"""Rename a vsm."""
kwargs = {}
if args.display_name is not None:
kwargs['display_name'] = args.display_name
if args.display_description is not None:
kwargs['display_description'] = args.display_description
_find_vsm(cs, args.vsm).update(**kwargs) | 5,354,959 |
def textarea(name, content="", id=NotGiven, **attrs):
"""Create a text input area.
"""
attrs["name"] = name
_set_id_attr(attrs, id, name)
return HTML.tag("textarea", content, **attrs) | 5,354,960 |
def graphics_renderer(player, walls):
"""
"""
# Background
screen.fill(PURE_LIME_GREEN)
# Build maze
build_maze(screen, walls)
# Build player
game.draw.rect(screen, player.get_color(), player.get_body())
# Flip display
game.display.flip() | 5,354,961 |
def get_score_checkpoint(loss_score):
"""Retrieves the path to a checkpoint file."""
name = "{}{:4f}.pyth".format(_SCORE_NAME_PREFIX, loss_score)
return os.path.join(get_checkpoint_dir(), name) | 5,354,962 |
def start_automated_run(path, automated_run_id):
"""Starts automated run. This will automatically create
base learners until the run finishes or errors out.
Args:
path (str): Path to Xcessiv notebook
automated_run_id (str): Automated Run ID
"""
with functions.DBContextManager(path) as session:
automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first()
if not automated_run:
raise exceptions.UserError('Automated run {} '
'does not exist'.format(automated_run_id))
automated_run.job_id = get_current_job().id
automated_run.job_status = 'started'
session.add(automated_run)
session.commit()
try:
if automated_run.category == 'bayes':
automatedruns.start_naive_bayes(automated_run, session, path)
elif automated_run.category == 'tpot':
automatedruns.start_tpot(automated_run, session, path)
elif automated_run.category == 'greedy_ensemble_search':
automatedruns.start_greedy_ensemble_search(automated_run, session, path)
else:
raise Exception('Something went wrong. Invalid category for automated run')
automated_run.job_status = 'finished'
session.add(automated_run)
session.commit()
except:
session.rollback()
automated_run.job_status = 'errored'
automated_run.description['error_type'] = repr(sys.exc_info()[0])
automated_run.description['error_value'] = repr(sys.exc_info()[1])
automated_run.description['error_traceback'] = \
traceback.format_exception(*sys.exc_info())
session.add(automated_run)
session.commit()
raise | 5,354,963 |
def StretchContrast(pixlist, minmin=0, maxmax=0xff):
""" Stretch the current image row to the maximum dynamic range with
minmin mapped to black(0x00) and maxmax mapped to white(0xff) and
all other pixel values stretched accordingly."""
if minmin < 0: minmin = 0 # pixel minimum is 0
if maxmax > 0xff: maxmax = 0xff # pixel maximum is 255
if maxmax < minmin: maxmax = minmin # range sanity
min, max = maxmax, minmin
for pix in pixlist:
if pix < min and pix >= minmin:
min = pix
if pix > max and pix <= maxmax:
max = pix
if min > max: min = max
if min == max:
f = 1.0
else:
f = 255.0 / (max - min)
n = 0
newpixlist= []
for pix in pixlist:
if pix < minmin: pix = minmin
if pix > maxmax: pix = maxmax
pix = int((pix - min) * f)
newpixlist.append (pix)
return newpixlist | 5,354,964 |
def detection(array, psf, bkg_sigma=1, mode='lpeaks', matched_filter=False,
mask=True, snr_thresh=5, plot=True, debug=False,
full_output=False, verbose=True, save_plot=None, plot_title=None,
angscale=False, pxscale=0.01):
""" Finds blobs in a 2d array. The algorithm is designed for automatically
finding planets in post-processed high contrast final frames. Blob can be
defined as a region of an image in which some properties are constant or
vary within a prescribed range of values. See <Notes> below to read about
the algorithm details.
Parameters
----------
array : array_like, 2d
Input frame.
psf : array_like
Input psf, normalized with ``vip_hci.phot.normalize_psf``.
bkg_sigma : float, optional
The number standard deviations above the clipped median for setting the
background level.
mode : {'lpeaks','log','dog'}, optional
Sets with algorithm to use. Each algorithm yields different results.
matched_filter : bool, optional
Whether to correlate with the psf of not.
mask : bool, optional
Whether to mask the central region (circular aperture of 2*fwhm radius).
snr_thresh : float, optional
SNR threshold for deciding whether the blob is a detection or not.
plot : bool, optional
If True plots the frame showing the detected blobs on top.
debug : bool, optional
Whether to print and plot additional/intermediate results.
full_output : bool, optional
Whether to output just the coordinates of blobs that fulfill the SNR
constraint or a table with all the blobs and the peak pixels and SNR.
verbose : bool, optional
Whether to print to stdout information about found blobs.
save_plot: string
If provided, the plot is saved to the path.
plot_title : str, optional
Title of the plot.
angscale: bool, optional
If True the plot axes are converted to angular scale.
pxscale : float, optional
Pixel scale in arcseconds/px. Default 0.01 for Keck/NIRC2.
Returns
-------
yy, xx : array_like
Two vectors with the y and x coordinates of the centers of the sources
(potential planets).
If full_output is True then a table with all the candidates that passed the
2d Gaussian fit constrains and their S/N is returned.
Notes
-----
The FWHM of the PSF is measured directly on the provided array. If the
parameter matched_filter is True then the PSF is used to run a matched
filter (correlation) which is equivalent to a convolution filter. Filtering
the image will smooth the noise and maximize detectability of objects with a
shape similar to the kernel.
The background level or threshold is found with sigma clipped statistics
(5 sigma over the median) on the image/correlated image. Then 5 different
strategies can be used to detect the blobs (potential planets):
Local maxima + 2d Gaussian fit. The local peaks above the background on the
(correlated) frame are detected. A maximum filter is used for finding local
maxima. This operation dilates the original image and merges neighboring
local maxima closer than the size of the dilation. Locations where the
original image is equal to the dilated image are returned as local maxima.
The minimum separation between the peaks is 1*FWHM. A 2d Gaussian fit is
done on each of the maxima constraining the position on the subimage and the
sigma of the fit. Finally the blobs are filtered based on its SNR.
Laplacian of Gaussian + 2d Gaussian fit. It computes the Laplacian of
Gaussian images with successively increasing standard deviation and stacks
them up in a cube. Blobs are local maximas in this cube. LOG assumes that
the blobs are again assumed to be bright on dark. A 2d Gaussian fit is done
on each of the candidates constraining the position on the subimage and the
sigma of the fit. Finally the blobs are filtered based on its SNR.
Difference of Gaussians. This is a faster approximation of LoG approach. In
this case the image is blurred with increasing standard deviations and the
difference between two successively blurred images are stacked up in a cube.
DOG assumes that the blobs are again assumed to be bright on dark. A 2d
Gaussian fit is done on each of the candidates constraining the position on
the subimage and the sigma of the fit. Finally the blobs are filtered based
on its SNR.
"""
def check_blobs(array_padded, coords_temp, fwhm, debug):
y_temp = coords_temp[:,0]
x_temp = coords_temp[:,1]
coords = []
# Fitting a 2d gaussian to each local maxima position
for y, x in zip(y_temp, x_temp):
subsi = 2 * int(np.ceil(fwhm))
if subsi %2 == 0:
subsi += 1
subim, suby, subx = get_square(array_padded, subsi, y+pad, x+pad,
position=True, force=True)
cy, cx = frame_center(subim)
gauss = models.Gaussian2D(amplitude=subim.max(), x_mean=cx,
y_mean=cy, theta=0,
x_stddev=fwhm*gaussian_fwhm_to_sigma,
y_stddev=fwhm*gaussian_fwhm_to_sigma)
sy, sx = np.indices(subim.shape)
fitter = fitting.LevMarLSQFitter()
fit = fitter(gauss, sx, sy, subim)
# checking that the amplitude is positive > 0
# checking whether the x and y centroids of the 2d gaussian fit
# coincide with the center of the subimage (within 2px error)
# checking whether the mean of the fwhm in y and x of the fit
# are close to the FWHM_PSF with a margin of 3px
fwhm_y = fit.y_stddev.value*gaussian_sigma_to_fwhm
fwhm_x = fit.x_stddev.value*gaussian_sigma_to_fwhm
mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)])
if fit.amplitude.value > 0 \
and np.allclose(fit.y_mean.value, cy, atol=2) \
and np.allclose(fit.x_mean.value, cx, atol=2) \
and np.allclose(mean_fwhm_fit, fwhm, atol=3):
coords.append((suby + fit.y_mean.value,
subx + fit.x_mean.value))
if debug:
print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x))
print('fit peak = {:.3f}'.format(fit.amplitude.value))
msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}'
print(msg.format(fwhm_y, fwhm_x))
print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit))
pp_subplots(subim, colorb=True, axis=False, dpi=60)
return coords
def print_coords(coords):
print('Blobs found:', len(coords))
print(' ycen xcen')
print('------ ------')
for i in range(len(coords[:, 0])):
print('{:.3f} \t {:.3f}'.format(coords[i,0], coords[i,1]))
def print_abort():
if verbose:
print(sep)
print('No potential sources found')
print(sep)
# --------------------------------------------------------------------------
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if psf.ndim != 2 and psf.shape[0] < array.shape[0]:
raise TypeError('Input psf is not a 2d array or has wrong size')
# Getting the FWHM from the PSF array
cenpsf = frame_center(psf)
outdf = fit_2dgaussian(psf, cent=(cenpsf), debug=debug, full_output=True)
fwhm_x, fwhm_y = outdf['fwhm_x'], outdf['fwhm_y']
fwhm = np.mean([fwhm_x, fwhm_y])
if verbose:
print('FWHM = {:.2f} pxs\n'.format(fwhm))
if debug:
print('FWHM_y', fwhm_y)
print('FWHM_x', fwhm_x)
# Masking the center, 2*lambda/D is the expected IWA
if mask:
array = mask_circle(array, radius=fwhm)
# Matched filter
if matched_filter:
frame_det = correlate(array, psf)
else:
frame_det = array
# Estimation of background level
_, median, stddev = sigma_clipped_stats(frame_det, sigma=5, iters=None)
bkg_level = median + (stddev * bkg_sigma)
if debug:
print('Sigma clipped median = {:.3f}'.format(median))
print('Sigma clipped stddev = {:.3f}'.format(stddev))
print('Background threshold = {:.3f}'.format(bkg_level))
print()
if mode == 'lpeaks' or mode == 'log' or mode == 'dog':
# Padding the image with zeros to avoid errors at the edges
pad = 10
array_padded = np.lib.pad(array, pad, 'constant', constant_values=0)
if debug and plot and matched_filter:
print('Input frame after matched filtering:')
pp_subplots(frame_det, rows=2, colorb=True)
if mode == 'lpeaks':
# Finding local peaks (can be done in the correlated frame)
coords_temp = peak_local_max(frame_det, threshold_abs=bkg_level,
min_distance=int(np.ceil(fwhm)),
num_peaks=20)
coords = check_blobs(array_padded, coords_temp, fwhm, debug)
coords = np.array(coords)
if verbose and coords.shape[0] > 0:
print_coords(coords)
elif mode == 'log':
sigma = fwhm*gaussian_fwhm_to_sigma
coords = feature.blob_log(frame_det.astype('float'),
threshold=bkg_level,
min_sigma=sigma-.5, max_sigma=sigma+.5)
if len(coords) == 0:
print_abort()
return 0, 0
coords = coords[:,:2]
coords = check_blobs(array_padded, coords, fwhm, debug)
coords = np.array(coords)
if coords.shape[0] > 0 and verbose:
print_coords(coords)
elif mode == 'dog':
sigma = fwhm*gaussian_fwhm_to_sigma
coords = feature.blob_dog(frame_det.astype('float'),
threshold=bkg_level, min_sigma=sigma-.5,
max_sigma=sigma+.5)
if len(coords) == 0:
print_abort()
return 0, 0
coords = coords[:, :2]
coords = check_blobs(array_padded, coords, fwhm, debug)
coords = np.array(coords)
if coords.shape[0] > 0 and verbose:
print_coords(coords)
else:
msg = 'Wrong mode. Available modes: lpeaks, log, dog.'
raise TypeError(msg)
if coords.shape[0] == 0:
print_abort()
return 0, 0
yy = coords[:, 0]
xx = coords[:, 1]
yy_final = []
xx_final = []
yy_out = []
xx_out = []
snr_list = []
xx -= pad
yy -= pad
# Checking S/N for potential sources
for i in range(yy.shape[0]):
y = yy[i]
x = xx[i]
if verbose:
print(sep)
print('X,Y = ({:.1f},{:.1f})'.format(x,y))
snr = snr_ss(array, (x,y), fwhm, False, verbose=False)
snr_list.append(snr)
if snr >= snr_thresh:
if verbose:
_ = frame_quick_report(array, fwhm, (x,y), verbose=verbose)
yy_final.append(y)
xx_final.append(x)
else:
yy_out.append(y)
xx_out.append(x)
if verbose:
print('S/N constraint NOT fulfilled (S/N = {:.3f})'.format(snr))
if debug:
_ = frame_quick_report(array, fwhm, (x,y), verbose=verbose)
if debug or full_output:
table = Table([yy.tolist(), xx.tolist(), snr_list],
names=('y', 'x', 'px_snr'))
table.sort('px_snr')
yy_final = np.array(yy_final)
xx_final = np.array(xx_final)
yy_out = np.array(yy_out)
xx_out = np.array(xx_out)
if plot:
coords = list(zip(xx_out.tolist() + xx_final.tolist(),
yy_out.tolist() + yy_final.tolist()))
circlealpha = [0.3] * len(xx_out)
circlealpha += [1] * len(xx_final)
pp_subplots(array, circle=coords, circlealpha=circlealpha,
circlelabel=True, circlerad=fwhm, save=save_plot, dpi=120,
angscale=angscale, pxscale=pxscale, title=plot_title)
if debug:
print(table)
if full_output:
return table
else:
return yy_final, xx_final | 5,354,965 |
def get_database_uri(application):
""" Returns database URI. Prefer SQLALCHEMY_DATABASE_URI over components."""
if application.config.get('SQLALCHEMY_DATABASE_URI'):
return application.config['SQLALCHEMY_DATABASE_URI']
return '{driver}://{username}:{password}@{host}:{port}/{name}'\
.format(driver=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_DRIVER'],
username=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_USERNAME'),
password=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_PASSWORD'),
host=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_HOST'],
port=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_PORT'],
name=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_NAME']) | 5,354,966 |
def get_s_vol_single_sma(c: CZSC, di: int = 1, t_seq=(5, 10, 20, 60)) -> OrderedDict:
"""获取倒数第i根K线的成交量单均线信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K成交量"
for t in t_seq:
x1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="其他", v2='其他', v3='其他')
x2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="其他", v2='其他', v3='其他')
s[x1.key] = x1.value
s[x2.key] = x2.value
min_k_nums = max(t_seq) + 10
if len(c.bars_raw) < min_k_nums:
return s
if di == 1:
vol = np.array([x.vol for x in c.bars_raw[-min_k_nums:]], dtype=np.float)
else:
vol = np.array([x.vol for x in c.bars_raw[-min_k_nums-di+1:-di+1]], dtype=np.float)
for t in t_seq:
sma = SMA(vol[-t-10:], timeperiod=t)
if vol[-1] >= sma[-1]:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="多头")
else:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="空头")
s[v1.key] = v1.value
if sma[-1] >= sma[-2]:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向上")
else:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向下")
s[v2.key] = v2.value
return s | 5,354,967 |
def sampler(value, percentile):
"""Score based on sampling task model output distribution
Args:
value: The output of the task model
percentile: the (sorted) index of the sample we use
Returns:
The percentile largest distance from the mean of the samples.
"""
softmaxed = nn.functional.softmax(value[0], dim=1)
samples = torch.tensor(
np.array(
list(
torch.utils.data.WeightedRandomSampler(
softmaxed, 10000)))).float()
mean_value = samples.mean(dim=1)
dist_from_mean = torch.abs(((
samples-mean_value.unsqueeze(1).repeat(
1, samples.shape[1]))+180)%360 - 180)
sorted_val = torch.sort(dist_from_mean).values
if percentile == 10000:
percentile = percentile-1
return sorted_val[:, percentile] | 5,354,968 |
def pretty_print(node, indent=' '*2, show_offsets=False):
"""
Pretty print node
:param node: Instance of `ast.Node`
:param indent: Number of spaces to indent
:param show_offsets: Show offsets. Boolean
:return:
"""
astpretty.pprint(node, indent=indent, show_offsets=show_offsets) | 5,354,969 |
def gists_by(username, number=-1, etag=None):
"""Iterate over gists created by the provided username.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.gists_by` instead.
:param str username: (required), if provided, get the gists for this user
instead of the authenticated user.
:param int number: (optional), number of gists to return. Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`
"""
if username:
return gh.gists_by(username, number, etag)
return iter([]) | 5,354,970 |
def needs_spark(test_item):
"""
Use as a decorator before test classes or methods to only run them if Spark is usable.
"""
test_item = _mark_test('spark', test_item)
try:
# noinspection PyUnresolvedReferences
import pyspark
except ImportError:
return unittest.skip("Skipping test. Install PySpark to include this test.")(test_item)
except:
raise
else:
return test_item | 5,354,971 |
def get_school_years_from_db() -> Generator:
"""Get all school years from the database.
:return: iterable with all availabe school years
"""
session: db.orm.session.Session = Session()
return (e[0] for e in set(session.query(Holiday.school_year).all())) | 5,354,972 |
def user_delete(handle, name):
"""
deletes user
Args:
handle (UcscHandle)
name (string): name
Returns:
None
Raises:
UcscOperationError: If AaaUser is not present
Example:
user_delete(handle, name="test")
"""
mo = user_get(handle, name)
if not mo:
raise UcscOperationError("user_delete",
"User does not exist.")
handle.remove_mo(mo)
handle.commit() | 5,354,973 |
def get_api(context=None):
"""
This function tries to detect if the app is running on a K8S cluster or locally
and returns the corresponding API object to be used to query the API server.
"""
if app.config.get("MODE") == "KUBECONFIG":
return client.CustomObjectsApi(config.new_client_from_config(context=context))
elif app.config.get("MODE") == "CLUSTER":
return client.CustomObjectsApi() | 5,354,974 |
def is_variant(title) -> bool:
"""
Check if an issue is variant cover.
"""
return "variant" in title.lower() | 5,354,975 |
def _decode_common(hparams):
"""Common graph for decoding."""
features = get_input(hparams, FLAGS.data_files)
decode_features = {}
for key in features:
if key.endswith("_refs"):
continue
decode_features[key] = features[key]
_, _, _, references = seq2act_model.compute_logits(
features, hparams, mode=tf.estimator.ModeKeys.EVAL)
decode_utils.decode_n_step(seq2act_model.compute_logits,
decode_features, references["areas"],
hparams, n=20,
beam_size=FLAGS.beam_size)
decode_mask = generate_action_mask(decode_features)
return decode_features, decode_mask, features | 5,354,976 |
def _lex_single_line_comment(header: str) -> Tuple[str, str]:
"""
>>> _lex_single_line_comment("a=10")
('', 'a=10')
>>> _lex_single_line_comment("//comment\\nb=20")
('', 'b=20')
"""
if header[:2] != "//":
return "", header
line_end_pos = header.find("\n")
return "", header[line_end_pos + 1 :] | 5,354,977 |
def create_loaders(config, save_dir=None):
"""Prepares the task and data loaders for a model trainer based on a provided data configuration.
This function will parse a configuration dictionary and extract all the information required to
instantiate the requested dataset parsers. Then, combining the task metadata of all these parsers, it
will evenly split the available samples into three sets (training, validation, test) to be handled by
different data loaders. These will finally be returned along with the (global) task object.
The configuration dictionary is expected to contain two fields: ``loaders``, which specifies all
parameters required for establishing the dataset split, shuffling seeds, and batch size (these are
listed and detailed below); and ``datasets``, which lists the dataset parser interfaces to instantiate
as well as their parameters. For more information on the ``datasets`` field, refer to
:func:`thelper.data.utils.create_parsers`.
The parameters expected in the 'loaders' configuration field are the following:
- ``<train_/valid_/test_>batch_size`` (mandatory): specifies the (mini)batch size to use in data
loaders. If you get an 'out of memory' error at runtime, try reducing it.
- ``<train_/valid_/test_>collate_fn`` (optional): specifies the collate function to use in data
loaders. The default one is typically fine, but some datasets might require a custom function.
- ``shuffle`` (optional, default=True): specifies whether the data loaders should shuffle
their samples or not.
- ``test_seed`` (optional): specifies the RNG seed to use when splitting test data. If no seed
is specified, the RNG will be initialized with a device-specific or time-related seed.
- ``valid_seed`` (optional): specifies the RNG seed to use when splitting validation data. If no
seed is specified, the RNG will be initialized with a device-specific or time-related seed.
- ``torch_seed`` (optional): specifies the RNG seed to use for torch-related stochastic operations
(e.g. for data augmentation). If no seed is specified, the RNG will be initialized with a
device-specific or time-related seed.
- ``numpy_seed`` (optional): specifies the RNG seed to use for numpy-related stochastic operations
(e.g. for data augmentation). If no seed is specified, the RNG will be initialized with a
device-specific or time-related seed.
- ``random_seed`` (optional): specifies the RNG seed to use for stochastic operations with python's
'random' package. If no seed is specified, the RNG will be initialized with a device-specific or
time-related seed.
- ``workers`` (optional, default=1): specifies the number of threads to use to preload batches in
parallel; can be 0 (loading will be on main thread), or an integer >= 1.
- ``pin_memory`` (optional, default=False): specifies whether the data loaders will copy tensors
into CUDA-pinned memory before returning them.
- ``drop_last`` (optional, default=False): specifies whether to drop the last incomplete batch
or not if the dataset size is not a multiple of the batch size.
- ``sampler`` (optional): specifies a type of sampler and its constructor parameters to be used
in the data loaders. This can be used for example to help rebalance a dataset based on its
class distribution. See :mod:`thelper.data.samplers` for more information.
- ``augments`` (optional): provides a list of transformation operations used to augment all samples
of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``train_augments`` (optional): provides a list of transformation operations used to augment the
training samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``valid_augments`` (optional): provides a list of transformation operations used to augment the
validation samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``test_augments`` (optional): provides a list of transformation operations used to augment the
test samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``eval_augments`` (optional): provides a list of transformation operations used to augment the
validation and test samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``base_transforms`` (optional): provides a list of transformation operations to apply to all
loaded samples. This list will be passed to the constructor of all instantiated dataset parsers.
See :func:`thelper.transforms.utils.load_transforms` for more info.
- ``train_split`` (optional): provides the proportion of samples of each dataset to hand off to the
training data loader. These proportions are given in a dictionary format (``name: ratio``).
- ``valid_split`` (optional): provides the proportion of samples of each dataset to hand off to the
validation data loader. These proportions are given in a dictionary format (``name: ratio``).
- ``test_split`` (optional): provides the proportion of samples of each dataset to hand off to the
test data loader. These proportions are given in a dictionary format (``name: ratio``).
- ``skip_verif`` (optional, default=True): specifies whether the dataset split should be verified
if resuming a session by parsing the log files generated earlier.
- ``skip_split_norm`` (optional, default=False): specifies whether the question about normalizing
the split ratios should be skipped or not.
- ``skip_class_balancing`` (optional, default=False): specifies whether the balancing of class
labels should be skipped in case the task is classification-related.
Example configuration file::
# ...
"loaders": {
"batch_size": 128, # batch size to use in data loaders
"shuffle": true, # specifies that the data should be shuffled
"workers": 4, # number of threads to pre-fetch data batches with
"train_sampler": { # we can use a data sampler to rebalance classes (optional)
# see e.g. 'thelper.data.samplers.WeightedSubsetRandomSampler'
# ...
},
"train_augments": { # training data augmentation operations
# see 'thelper.transforms.utils.load_augments'
# ...
},
"eval_augments": { # evaluation (valid/test) data augmentation operations
# see 'thelper.transforms.utils.load_augments'
# ...
},
"base_transforms": { # global sample transformation operations
# see 'thelper.transforms.utils.load_transforms'
# ...
},
# optionally indicate how to resolve dataset loader task vs model task incompatibility if any
# leave blank to get more details about each case during runtime if this situation happens
"task_compat_mode": "old|new|compat",
# finally, we define a 80%-10%-10% split for our data
# (we could instead use one dataset for training and one for testing)
"train_split": {
"dataset_A": 0.8
"dataset_B": 0.8
},
"valid_split": {
"dataset_A": 0.1
"dataset_B": 0.1
},
"test_split": {
"dataset_A": 0.1
"dataset_B": 0.1
}
# (note that the dataset names above are defined in the field below)
},
"datasets": {
"dataset_A": {
# type of dataset interface to instantiate
"type": "...",
"params": {
# ...
}
},
"dataset_B": {
# type of dataset interface to instantiate
"type": "...",
"params": {
# ...
},
# if it does not derive from 'thelper.data.parsers.Dataset', a task is needed:
"task": {
# this type must derive from 'thelper.tasks.Task'
"type": "...",
"params": {
# ...
}
}
},
# ...
},
# ...
Args:
config: a dictionary that provides all required data configuration information under two fields,
namely 'datasets' and 'loaders'.
save_dir: the path to the root directory where the session directory should be saved. Note that
this is not the path to the session directory itself, but its parent, which may also contain
other session directories.
Returns:
A 4-element tuple that contains: 1) the global task object to specialize models and trainers with;
2) the training data loader; 3) the validation data loader; and 4) the test data loader.
.. seealso::
| :func:`thelper.data.utils.create_parsers`
| :func:`thelper.transforms.utils.load_augments`
| :func:`thelper.transforms.utils.load_transforms`
"""
logstamp = thelper.utils.get_log_stamp()
repover = thelper.__version__ + ":" + thelper.utils.get_git_stamp()
session_name = config["name"] if "name" in config else "session"
data_logger_dir = None
if save_dir is not None:
thelper.utils.init_logger() # make sure all logging is initialized before attaching this part
data_logger_dir = os.path.join(save_dir, "logs")
os.makedirs(data_logger_dir, exist_ok=True)
data_logger_path = os.path.join(data_logger_dir, "data.log")
data_logger_format = logging.Formatter("[%(asctime)s - %(process)s] %(levelname)s : %(message)s")
data_logger_fh = logging.FileHandler(data_logger_path)
data_logger_fh.setLevel(logging.NOTSET)
data_logger_fh.setFormatter(data_logger_format)
thelper.data.logger.addHandler(data_logger_fh)
thelper.data.logger.info(f"created data log for session '{session_name}'")
logger.debug("loading data usage config")
# todo: 'data_config' field is deprecated, might be removed later
if "data_config" in config:
logger.warning("using 'data_config' field in configuration dictionary is deprecated; switch it to 'loaders'")
loaders_config = thelper.utils.get_key(["data_config", "loaders"], config)
# noinspection PyProtectedMember
from thelper.data.loaders import LoaderFactory as LoaderFactory
loader_factory = LoaderFactory(loaders_config)
datasets, task = create_parsers(config, loader_factory.get_base_transforms())
assert datasets and task is not None, "invalid dataset configuration (got empty list)"
for dataset_name, dataset in datasets.items():
logger.info(f"parsed dataset: {str(dataset)}")
logger.info(f"task info: {str(task)}")
logger.debug("splitting datasets and creating loaders...")
train_idxs, valid_idxs, test_idxs = loader_factory.get_split(datasets, task)
if save_dir is not None:
with open(os.path.join(data_logger_dir, "task.log"), "a+") as fd:
fd.write(f"session: {session_name}-{logstamp}\n")
fd.write(f"version: {repover}\n")
fd.write(str(task) + "\n")
for dataset_name, dataset in datasets.items():
dataset_log_file = os.path.join(data_logger_dir, dataset_name + ".log")
if not loader_factory.skip_verif and os.path.isfile(dataset_log_file):
logger.info(f"verifying sample list for dataset '{dataset_name}'...")
log_content = thelper.utils.load_config(dataset_log_file, as_json=True, add_name_if_missing=False)
assert isinstance(log_content, dict), "old split data logs no longer supported for verification"
samples_old, samples_new = None, None
if "samples" in log_content:
assert isinstance(log_content["samples"], list), \
"unexpected dataset log content (bad 'samples' field, should be list)"
samples_old = log_content["samples"]
samples_new = dataset.samples if hasattr(dataset, "samples") and dataset.samples is not None \
and len(dataset.samples) == len(dataset) else []
if len(samples_old) != len(samples_new):
query_msg = f"old sample list for dataset '{dataset_name}' mismatch with current list; proceed?"
answer = thelper.utils.query_yes_no(query_msg, bypass="n")
if not answer:
logger.error("sample list mismatch with previous run; user aborted")
sys.exit(1)
break
for set_name, idxs in zip(["train_idxs", "valid_idxs", "test_idxs"],
[train_idxs[dataset_name], valid_idxs[dataset_name], test_idxs[dataset_name]]):
# index values were paired in tuples earlier, 0=idx, 1=label --- we unpack in the miniloop below
if not np.array_equal(np.sort(log_content[set_name]), np.sort([idx for idx, _ in idxs])):
query_msg = f"Old indices list for dataset '{dataset_name}' mismatch with current indices" \
f"list ('{set_name}'); proceed anyway?"
answer = thelper.utils.query_yes_no(query_msg, bypass="n")
if not answer:
logger.error("indices list mismatch with previous run; user aborted")
sys.exit(1)
break
printer = pprint.PrettyPrinter(indent=2)
log_sample_metadata = thelper.utils.get_key_def(["log_samples", "log_samples_metadata"], config, default=False)
for dataset_name, dataset in datasets.items():
dataset_log_file = os.path.join(data_logger_dir, dataset_name + ".log")
samples = dataset.samples if hasattr(dataset, "samples") and dataset.samples is not None \
and len(dataset.samples) == len(dataset) else []
log_content = {
"metadata": {
"session_name": session_name,
"logstamp": logstamp,
"version": repover,
"dataset": str(dataset),
},
# index values were paired in tuples earlier, 0=idx, 1=label
"train_idxs": [int(idx) for idx, _ in train_idxs[dataset_name]],
"valid_idxs": [int(idx) for idx, _ in valid_idxs[dataset_name]],
"test_idxs": [int(idx) for idx, _ in test_idxs[dataset_name]]
}
if log_sample_metadata:
log_content["samples"] = [printer.pformat(sample) for sample in samples]
# now, always overwrite, as it can get too big otherwise
with open(dataset_log_file, "w") as fd:
json.dump(log_content, fd, indent=4, sort_keys=False)
train_loader, valid_loader, test_loader = loader_factory.create_loaders(datasets, train_idxs, valid_idxs, test_idxs)
return task, train_loader, valid_loader, test_loader | 5,354,978 |
def in_box(X, box):
"""Get a boolean array indicating whether points X are within a given box
:param X: n_pts x n_dims array of points
:param box: 2 x n_dims box specs (box[0, :] is the min point and box[1, :] is the max point)
:return: n_pts boolean array r where r[idx] is True iff X[idx, :] is within the box
>>> import numpy as np
>>> X = np.arange(12).reshape((4, 3))
>>> print(X)
[[ 0 1 2]
[ 3 4 5]
[ 6 7 8]
[ 9 10 11]]
>>> in_box(X, [[1, 2, 3], [6, 7, 8]])
array([False, True, True, False])
>>> in_box(X, box=[[2] * 3, [7] * 3])
array([False, True, False, False])
"""
MINS_ROW_IDX = 0
MAXS_ROW_IDX = 1
X, box = map(np.array, (X, box))
n_rows_in_box_matrix, ndims = box.shape
assert (
n_rows_in_box_matrix == 2
), 'box must have 2 rows only: [0] the min point and [1] the max point of the box'
assert (
X.shape[1] == ndims
), f"ndims of X should be aligned with box's ({ndims}): Was {X.shape[1]}"
return np.all((box[MINS_ROW_IDX, :] <= X) & (X <= box[MAXS_ROW_IDX, :]), axis=1) | 5,354,979 |
def test_partial_field_square():
"""Fields that do not extend over the whole wall"""
field = np.zeros((40, 40))
field[:10, 0] = 1
fields = {kw_field_map: field}
walls = "L"
assert func(fields, "s", walls=walls) == 0.25
field[:20, 0] = 1
assert func(fields, "s", walls=walls) == 0.5
field[:30, 0] = 1
assert func(fields, "s", walls=walls) == 0.75
print("test_partial_field() passed")
return True | 5,354,980 |
def get_countries():
"""
The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Returns:
Dictionary: A dictionary with the country codes as the keys and the
country names as the values.
"""
#Initialize the countries dictionary.
countries = {}
#Set the data directory based on if the script is a frozen executable.
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
data_dir = path.dirname(sys.executable)
else:
data_dir = path.dirname(__file__)
#Create the country codes file object.
f = open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r')
#Read the file.
data = f.read()
#Check if there is data.
if not data:
return {}
#Parse the data to get the DOM.
dom = parseString(data)
#Retrieve the country entries.
entries = dom.getElementsByTagName('ISO_3166-1_Entry')
#Iterate through the entries and add to the countries dictionary.
for entry in entries:
#Retrieve the country code and name from the DOM.
code = entry.getElementsByTagName(
'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
name = entry.getElementsByTagName(
'ISO_3166-1_Country_name')[0].firstChild.data
#Add to the countries dictionary.
countries[code] = name.title()
return countries | 5,354,981 |
def get_empty_faceid(current_groupid, uuid, embedding,
img_style, number_people, img_objid, forecast_result):
"""
当softmax无结果时(无模型/预测置信度低)调用遍历数据库识别
:param current_groupid:
:param uuid:
:param embedding:
:param img_style:
:param number_people:
:param img_objid:
:return:
"""
json_data = {'detected': True, 'recognized': False}
face_id = img_objid + str(all_face_index).zfill(4)
json_data['recognized'] = False
json_data['face_id'] = face_id
json_data['accuracy'] = 0
json_data['style'] = img_style
forecast_result['face_id'] = face_id
forecast_result['face_accuracy'] = 0
embedding_string = ','.join(str(x) for x in embedding)
forecast_result['embedding_string'] = embedding_string
return json_data, forecast_result | 5,354,982 |
def dz_and_top_to_phis(
top_height: xr.DataArray, dz: xr.DataArray, dim: str = COORD_Z_CENTER
) -> xr.DataArray:
""" Compute surface geopotential from model top height and layer thicknesses"""
return _GRAVITY * (top_height + dz.sum(dim=dim)) | 5,354,983 |
def altPDF(peaks,mu,sigma=None,exc=None,method="RFT"):
"""
altPDF: Returns probability density using a truncated normal
distribution that we define as the distribution of local maxima in a
GRF under the alternative hypothesis of activation
parameters
----------
peaks: float or list of floats
list of peak heigths
mu:
sigma:
returns
-------
fa: float or list
probability density of the peaks heights under Ha
"""
#Returns probability density of the alternative peak distribution
peaks = np.asarray(peaks)
if method == "RFT":
# assert type(sigma) is in [float, int]
# assert sigma is not None
ksi = (peaks-mu)/sigma
alpha = (exc-mu)/sigma
num = 1/sigma * scipy.stats.norm.pdf(ksi)
den = 1. - scipy.stats.norm.cdf(alpha)
fa = num/den
elif method == "CS":
fa = [peakdistribution.peakdens3D(y-mu,1) for y in peaks]
return fa | 5,354,984 |
def implicit_quantile_network(num_actions, quantile_embedding_dim,
network_type, state, num_quantiles):
"""The Implicit Quantile ConvNet.
Args:
num_actions: int, number of actions.
quantile_embedding_dim: int, embedding dimension for the quantile input.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
num_quantiles: int, number of quantile inputs.
Returns:
net: _network_type object containing the tensors output by the network.
"""
model = atari_lib.ImplicitQuantileNetwork(num_actions, quantile_embedding_dim)
net = model(state, num_quantiles)
return network_type(quantile_values=net.quantile_values,
quantiles=net.quantiles) | 5,354,985 |
def execute_command_line(cmd_list):
"""Executes the given command list on the command line
:param cmd_list: The list of commands
:type cmd_list: []
"""
logger.debug('Executing: %s', ' '.join(cmd_list))
try:
subprocess.check_output(cmd_list, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
raise CommandError('Exit code %i: %s' % (ex.returncode, ex.output), ex.returncode) | 5,354,986 |
def destroy_droplets(ctx):
"""Destroy the droplets - node-1, node-2, node-3"""
manager = Manager(token=DIGITAL_OCEAN_ACCESS_TOKEN)
for num in range(3):
node = f"node-{num + 1}"
droplets = manager.get_all_droplets(tag_name=node)
for droplet in droplets:
droplet.destroy()
print(f"{node} has been destroyed.") | 5,354,987 |
def test_series_index_name_change(spark_context) -> None:
"""
Test pontem_series attributes against pandas series attributes of same data
"""
import pontem as pt
sc = spark_context
pontem_series = pt.Series(sc=sc, data=DATA)
pontem_series.index.name = 'new_name'
# Ensure the name stayed.
assert pontem_series.index.name == 'new_name', \
'Assigned "new_name" to pontem.Series.index but did not persist; currently {}'.format(pontem_series.index.name)
# Ensure the name change is propagated into the actual schema of the underlying pyspark df
assert 'new_name' in pontem_series._pyspark_series.schema.names, \
'Unable to change pontem.Series.index.name to "new_name"' | 5,354,988 |
def get_waitlist(usercode):
"""
Запрос /api/waitlists/{usercode} - возвращает waitlist контент по usercode
"""
user_by_usercode = (
AppUsers.query.filter(AppUsers.usercode == usercode).one_or_none()
)
if user_by_usercode is None:
abort(
409,
"Usercode {usercode} does not exists".format(
usercode=usercode
),
)
string_array_waitlist = user_by_usercode.waitlist
# Запрос по id
try:
array_waitlist = [int(s) for s in string_array_waitlist.split(',')]
except ValueError:
abort(
404,
"Waitlist empty or wrong format. Format of waitlist string should be - 1,2,3,4,5 etc",
)
except AttributeError:
abort(
404,
"Waitlist empty or wrong format. Format of waitlist string should be - 1,2,3,4,5 etc",
)
content = Content.query.filter(Content.content_id.in_(array_waitlist)).all()
# Проверка на наличие id
if content is not None:
# Сериализация
content_schema = ContentSchema(many=True)
data = content_schema.dump(content).data
return data
# Ошибка, если нет
else:
abort(
404,
"Empty show list with this IDs",
) | 5,354,989 |
def not_empty(message=None) -> Filter_T:
"""
Validate any object to ensure it's not empty (is None or has no elements).
"""
def validate(value):
if value is None:
_raise_failure(message)
if hasattr(value, '__len__') and value.__len__() == 0:
_raise_failure(message)
return value
return validate | 5,354,990 |
def process_mark_time():
""" Determine if DOT or DASH timing has elapsed """
global mark_has_begun, mark_start_time
if not mark_has_begun: return
clear_extra_space()
mark_interval = running_time() - mark_start_time
if mark_interval > DOT_TIME_MIN and mark_interval < DOT_TIME_MAX:
message.append(DOT)
space_start_time = running_time()
elif mark_interval > DASH_TIME_MIN and mark_interval < DASH_TIME_MAX:
message.append(DASH)
space_start_time = running_time()
mark_has_begun = False | 5,354,991 |
def temporal_affine_forward(x, W, b):
"""
Run a forward pass for temporal affine layer. The dimensions are consistent with RNN/LSTM forward passes.
Arguments:
x: input data with shape (N, T, D)
W: weight matrix for input data with shape (D, M)
b: bias with shape (M,)
Outputs:
out: output data with shape (N, T, M)
cache: cache for back-prop
"""
N, T, D = x.shape
M = b.shape[0]
out = np.dot(x.reshape(N * T, D), W).reshape(N, T, M) + b
cache = x, W, b, out
return out, cache | 5,354,992 |
def uniform_regular_knot_vector(n, p, t0=0.0, t1=1.0):
"""
Create a p+1-regular uniform knot vector for
a given number of control points
Throws if n is too small
"""
# The minimum length of a p+1-regular knot vector
# is 2*(p+1)
if n < p+1:
raise RuntimeError("Too small n for a uniform regular knot vector")
# p+1 copies of t0 left and p+1 copies of t1 right
# but one of each in linspace
return [t0]*p + list(np.linspace(t0, t1, n+1-p)) + [t1]*p | 5,354,993 |
def include_file(filename, global_vars=None, local_vars=None):
"""
.. deprecated 2.1::
Don't use this any more.
It's not pythonic.
include file like php include.
include is very useful when we need to split large config file
"""
if global_vars is None:
global_vars = sys._getframe(1).f_globals
if local_vars is None:
local_vars = sys._getframe(1).f_locals
with open(filename, 'r') as f:
code = compile(f.read(), os.path.basename(filename), 'exec')
exec(code, global_vars, local_vars)
pass | 5,354,994 |
def rm(path: PathType,
*, is_dir: bool = True, globs: Optional[str] = None,
quiet: bool = True, verbose: bool = True):
# pylint: disable=invalid-name,too-many-arguments
"""Remove a directory, a file, or glob-pattern-matched items from S3."""
s3_command: str = (f'aws s3 rm {path}' +
((' --recursive' +
((' --exclude "*" ' +
' '.join(f'--include "{glob}"'
for glob in to_iterable(globs)))
if globs
else ''))
if is_dir
else '') +
(' --quiet' if quiet else ''))
if verbose:
_LOGGER.info(msg=(msg := ('Deleting ' +
((f'Globs "{globs}" @ '
if globs
else 'Directory ')
if is_dir
else '') +
f'"{path}"...')))
_LOGGER.debug(msg=f'Running: {s3_command}...')
tic: float = time.time()
os.system(command=s3_command)
if verbose:
toc: float = time.time()
_LOGGER.info(msg=f'{msg} done! <{toc - tic:,.1f} s>') | 5,354,995 |
def generateCM(labelValue, predictValue):
"""Generates the confusion matrix and rteturn it.
Args:
labelValue (np.ndarray): true values.
predictValue (np.ndarray): predicted values.
"""
FPMtx = np.logical_and((labelValue <= 0), (predictValue > 0))
FPIndices = np.argwhere(FPMtx)
FPNum = np.sum(FPMtx)
FNMtx = np.logical_and((labelValue > 0), (predictValue <= 0))
FNIndices = np.argwhere(FNMtx)
FNNum = np.sum(FNMtx)
TPMtx = np.logical_and((labelValue > 0), (predictValue > 0))
TPIndices = np.argwhere(TPMtx)
TPNum = np.sum(TPMtx)
TNMtx = np.logical_and((labelValue <= 0), (predictValue <= 0))
TNIndices = np.argwhere(TNMtx)
TNNum = np.sum(TNMtx)
accuracy = (TPNum+TNNum) / (TPNum+TNNum+FPNum+FNNum)
FPrate = FPNum / (FPNum+TNNum)
FNrate = FNNum / (TPNum+FNNum)
TNrate = TNNum / (FPNum+TNNum)
TPrate = TPNum / (TPNum+FNNum)
print(
"TP: {:.0f}, FN: {:.0f}, FP: {:.0f}, TN: {:.0f}".format(
TPNum, FNNum, FPNum, TNNum
)
)
cm = np.array([[TPrate, FNrate], [FPrate, TNrate]])
return cm, accuracy, TPIndices, FNIndices, FPIndices, TNIndices | 5,354,996 |
def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:
"""Get the cosine similarity matrix from the embedding.
Warning; might be very big!
"""
return cosine_similarity(word2vec.wv.vectors) | 5,354,997 |
def get_bot_id() -> str:
"""
Gets the app bot ID
Returns:
The app bot ID
"""
response = CLIENT.auth_test()
return response.get('user_id') | 5,354,998 |
def plot_dr_viability(data, y_label, path):
"""Plots response vs viability. The DataFrame should contain columns ['Compound', 'Dose','logDose', 'Viability', 'Response'] (at least)."""
import pandas as pd
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context = 'notebook', style = 'white', palette = 'dark')
import logging
logging.basicConfig(level = logging.INFO)
import os
df = data[['Compound_id', 'Dose','logDose', 'Viability', 'Response']]
df = df[(df != 0).all(1)] # drop zero values
df_mean = df.groupby(['Compound_id','Dose'], as_index = False).mean() # calculate response mean values
df_mean['resp_std'] = list(df.groupby(['Compound_id','Dose']).std().Response.values) # calculate response std
df_mean['via_std'] = list(df.groupby(['Compound_id','Dose']).std().Viability.values) # calculate viability std
for name, group in df_mean.groupby('Compound_id'): # group data by compounds
group = group.sort_values('Dose')
error_resp, error_via = group.resp_std, group.via_std
fig, ax1 = plt.subplots(figsize = (6,6))
plt.title(name, fontsize = 16)
plot1 = ax1.plot(group.logDose, group.Response, 'b', label = 'Response')
ax1.set_xlim(max(group.logDose)*1.07, min(group.logDose)*0.9)
ax1.set_ylabel(y_label, fontsize = 16)
ax1.set_ylim(0, df_mean.Response.max()*1.2)
ax1.errorbar(group.logDose, group.Response,yerr = error_resp, fmt ='o', color ='b', ecolor = 'lightblue')
ax2 = ax1.twinx()
plot2 = ax2.plot(group.logDose, group.Viability, 'g', label = 'Viability')
ax2.set_xlim(max(group.logDose)*1.07, min(group.logDose)*0.9)
ax2.set_ylabel('Viability', fontsize = 16)
ax2.set_ylim(0, 120)
ax2.errorbar(group.logDose, group.Viability,yerr = error_via, fmt ='o', color = 'g', ecolor = 'lightgreen')
ax1.set_xlabel('Dose, um', fontsize = 16)
# create legend
lines = plot1 + plot2
ax1.legend(lines, [l.get_label() for l in lines])
locs, labels = plt.xticks()
new_labels =[]
for loc in locs:
inv_log = lambda x:((10**-x)/(1e-6)) # inverse log calculator to set xticks
inv_x = round(inv_log(loc), 1)
new_labels.append(inv_x)
ax1.set_xticklabels(new_labels)
if path:
plt.savefig(path +'//' + name + '_raw_viability.png', bbox_inches = 'tight', dpi = 600)
logging.info(f'plot_dr_viability: {name}_raw_viability.png saved to the output folder')
else:
plt.savefig(os.getcwd() +'//' + name +'_raw_viability.png', bbox_inches =' tight', dpi = 600)
logging.info(f'plot_dr_viability: {name}_raw_viability.png saved to the working directory')
plt.close() | 5,354,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.