content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def str2bytes(s):
"""
Returns byte string representation of product state.
Parameters
----------
s : str
Representation of a product state, in terms of a string.
"""
return bitarray2bytes(str2bitarray(s)) | defb9f471ba6108a0d667b6f4e9522c8b6f38649 | 3,654,100 |
import re
def find_links(text, image_only=False):
"""
Find Markdown links in text and return a match object.
Markdown links are expected to have the form [some txt](A-url.ext)
or .
Parameters
----------
text : str
Text in which to search for links.
image_only : bool
If ``True``, find only markdown image links, i.e. those that
begin with an exclamation mark.
Returns
-------
list
List of ``re.Match`` objects, one for each link found. Each object
has two named groups, 'link_text', which contains the the part between
the square brackets, and 'link',which is the URL (or file name for an
image).
"""
if image_only:
markdown_link = \
re.compile(r"!\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)",
flags=re.MULTILINE)
else:
markdown_link = \
re.compile(r"!?\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)",
flags=re.MULTILINE)
groups = [m for m in markdown_link.finditer(text)]
return groups | 5f96672b48d3d911faf2e398c86f622676263d73 | 3,654,101 |
def least_one_row(data_frame):
"""
checking at least one row in dataframe
Input: pandas dataframe
Output: True or False
"""
if data_frame:
return True
return False | a72cbd3d504140547233481ec8340a8510e35f52 | 3,654,102 |
import inspect
from textwrap import dedent
def get_reference_docs():
"""Create reference documentation from the source code.
A bit like Sphinx autodoc, but using Markdown, and more basic.
Returns a str in Markdown format.
Note that this function is used to build the Dash Slicer chapter
in the Dash docs.
"""
methods = []
props = []
sig = str(inspect.signature(dash_slicer.VolumeSlicer.__init__)).replace(
"self, ", ""
)
doc = f"**class `VolumeSlicer{sig}`**"
doc += "\n\n" + dedent(dash_slicer.VolumeSlicer.__doc__).rstrip()
methods.append(doc)
for name in dir(dash_slicer.VolumeSlicer):
val = getattr(dash_slicer.VolumeSlicer, name)
if name.startswith("_") or not hasattr(val, "__doc__"):
pass
elif callable(val):
# Method
sig = str(inspect.signature(val)).replace("self, ", "")
doc = f"**method `VolumeSlicer.{name}{sig}`**"
doc += "\n\n" + dedent(val.__doc__).rstrip()
methods.append(doc)
else:
# Property
doc = f"**property `VolumeSlicer.{name}`**"
try:
typ = val.fget.__annotations__["return"].__name__
doc += f" (`{typ}`)"
except (AttributeError, KeyError):
pass
doc += ": " + dedent(val.__doc__).rstrip()
props.append(doc)
parts = []
parts.append("### The VolumeSlicer class")
parts += methods
parts += props
parts.append(dash_slicer.slicer.__doc__)
return "\n\n".join(parts) | e7727704026d6b0013f15e1be966ee92dbc19ef5 | 3,654,103 |
import math
def generate_label_colors(labels: list, colors: list, palette='Set2'):
"""Matches labels with colors
If there are more labels than colors, repeat and cycle through colors
"""
label_colors = defaultdict(dict)
num_repeats = math.ceil(len(labels) / len(colors))
for label in enumerate(labels):
label_colors[label[1]] = (colors * num_repeats)[label[0]]
return {**label_colors} | 8b4b35498d4478604e81987b127ab099ebb0e70b | 3,654,104 |
import os
def get_xml_file_path_from_image_name(image_name, xml_dir_or_txt):
"""Retrieve xml filepath from xml dir
Args:
image_name:
xml_dir_or_txt:
Returns:
xml_path:
"""
if os.path.isfile(xml_dir_or_txt):
filepaths = fileio.read_list_from_txt(xml_dir_or_txt, field=-1)
elif os.path.isdir(xml_dir_or_txt):
filepaths = list(glob2.glob(os.path.join(xml_dir_or_txt, '**', '*xml')))
else:
raise ValueError('xml_dir_or_txt is neither a directory nor file')
image_name_no_ext = os.path.splitext(os.path.basename(image_name))[0]
xml_path_list = []
for filepath in filepaths:
if image_name_no_ext in filepath:
xml_path_list.append(filepath)
# print(filepath)
assert len(xml_path_list) <= 1, 'xml_path_list expect 0 or 1 element but found {}!'.format(len(xml_path_list))
if len(xml_path_list) == 1:
xml_path = xml_path_list[0]
else:
xml_path = None
return xml_path | 1b9e62bf77e1055a230ea47ea628677bfb8cde86 | 3,654,105 |
def index():
"""
View root page function that returns the index page and its data
"""
# getting top headlines in sources
topheadlines_sources = get_sources('top-headlines')
business_sources = get_sources('business')
entertainment_sources = get_sources('entertainment')
title = 'Home - Welcome to your online News room'
# print(topheadlines_sources.articles)
# search_source = request.args.get(source_query)
# if search_source:
# return redirect(url_for('search',source_name=search_source))
# else:
return render_template('index.html', title = title , topheadlines_sources = topheadlines_sources, business_sources = business_sources, entertainment_sources = entertainment_sources) | df3c5d0471cde998f6ea5a0de2b41ab16ef775c6 | 3,654,106 |
def start_ltm(tup,
taus,
w=0.1,
add_coh=False,
use_cv=False,
add_const=False,
verbose=False,
**kwargs):
"""Calculate the lifetime density map for given data.
Parameters
----------
tup : datatuple
tuple with wl, t, data
taus : list of floats
Used to build the basis vectors.
w : float, optional
Used sigma for calculating the , by default 0.1.
add_coh : bool, optional
If true, coherent contributions are added to the basis.
By default False.
use_cv : bool, optional
Whether to use cross-validation, by default False
add_const : bool, optional
Whether to add an explict constant, by default False
verbose : bool, optional
Wheater to be verobse, by default False
Returns
-------
tuple of (linear_model, coefs, fit, alphas)
The linear model is the used sklearn model. Coefs is the arrary
of the coefficents, fit contains the resulting fit and alphas
is an array of the applied alpha value when using cv.
"""
X = _make_base(tup, taus, w=w, add_const=add_const, add_coh=add_coh)
if not use_cv:
mod = lm.ElasticNet(**kwargs, l1_ratio=0.98)
else:
mod = lm.ElasticNetCV(**kwargs, l1_ratio=0.98)
mod.fit_intercept = not add_const
mod.warm_start = 1
coefs = np.empty((X.shape[1], tup.data.shape[1]))
fit = np.empty_like(tup.data)
alphas = np.empty(tup.data.shape[1])
for i in range(tup.data.shape[1]):
if verbose:
print(i, 'ha', end=';')
mod.fit(X, tup.data[:, i])
coefs[:, i] = mod.coef_.copy()
fit[:, i] = mod.predict(X)
if hasattr(mod, 'alpha_'):
alphas[i] = mod.alpha_
return mod, coefs, fit, alphas | d24d2fdc9740a12766b5424a20c98f4ab14222eb | 3,654,107 |
def manhattan_distance(origin, destination):
"""Return the Manhattan distance between the origin and the destination.
@type origin: Location
@type destination: Location
@rtype: int
>>> pt1 = Location(1,2)
>>> pt2 = Location(3,4)
>>> print(manhattan_distance(pt1, pt2))
4
"""
return (abs(origin.row - destination.row) +
abs(origin.column - destination.column)) | 0bcfd7767e44b0dcc47890dc4bcb2c054abb4bde | 3,654,108 |
import os
import json
def get_release():
"""Get the current release of the application.
By release, we mean the release from the version.json file à la Mozilla [1]
(if any). If this file has not been found, it defaults to "NA".
[1]
https://github.com/mozilla-services/Dockerflow/blob/master/docs/version_object.md
"""
# Try to get the current release from the version.json file generated by the
# CI during the Docker image build
try:
with open(os.path.join(BASE_DIR, "version.json"), encoding="utf8") as version:
return json.load(version)["version"]
except FileNotFoundError:
return "NA" | 5aece4ae245637243543e3bd5d81b4dabf0e968e | 3,654,109 |
from typing import Dict
from typing import List
import logging
def find_best_resampler(
features_train: pd.DataFrame, labels_train: pd.DataFrame, parameters: Dict
) -> List:
"""Compare several resamplers and find the best one to handle imbalanced labels.
Args:
features_train: Training data of independent features.
labels_train: Training data of next month payment default status.
parameters: Parameters defined in parameters.yml.
Returns:
A list containing the best resampler and the search CV results as DataFrame.
"""
col_dict = _get_column_dtype(features_train)
if labels_train.shape[0] == features_train.shape[0]:
labels_train.index = features_train.index
# Create transformers for each dtype
transformers = [
("num_n_trans", StandardScaler(), col_dict["num_normal"]),
(
"num_s_trans",
QuantileTransformer(random_state=parameters["random_state"]),
col_dict["num_skewed"],
),
("ordi_trans", "passthrough", col_dict["ordinal"]),
("bool_pass", "passthrough", col_dict["boolean"]),
(
"cat_trans",
JamesSteinEncoder(random_state=parameters["random_state"], return_df=False),
col_dict["category"],
),
]
transformers = _remove_unused_transformers(transformers)
# Combine the transformers as preprocessor
preprocessor = ColumnTransformer(transformers=transformers)
num_cols = col_dict["num_normal"] + col_dict["num_skewed"]
nomi_cols = col_dict["ordinal"] + col_dict["boolean"] + col_dict["category"]
# Extract target
target_train = labels_train["DEFAULT_PAY"]
# Initalize samplers
smotenc_smpl = SMOTENC(
categorical_features=[
x for x in range(len(num_cols), len(num_cols) + len(nomi_cols))
],
n_jobs=-1,
)
ro_smpl = RandomOverSampler()
enn_smpl = EditedNearestNeighbours(n_jobs=-1)
tl_smpl = TomekLinks(n_jobs=-1)
ru_smpl = RandomUnderSampler()
# Initalize classifier
clf = ExtraTreesClassifier(max_depth=10, n_jobs=-1)
# Create parameter grid
param_grid = {
"sampler": [None, smotenc_smpl, ro_smpl, enn_smpl, tl_smpl, ru_smpl],
"classifier": [clf],
}
# Create classifier pipeline
resampler = PipelineImb(
steps=[
("preprocessor", preprocessor),
("sampler", smotenc_smpl),
("classifier", clf),
]
)
# Start grid search
search_cv = GridSearchCV(
resampler,
param_grid=param_grid,
scoring=[
"precision",
"recall",
"f1",
"roc_auc",
],
refit="f1",
error_score=0,
verbose=2,
)
timer_start = timer()
search_cv.fit(features_train, target_train)
timer_end = timer()
# Log search duration
logger = logging.getLogger(__name__)
logger.info(
f"Best resampler search elapsed time : {_get_time_delta(timer_end - timer_start)}."
)
# Save search result as DataFrame
search_results = pd.DataFrame(search_cv.cv_results_).sort_values(
by=["rank_test_f1"]
)
# Remove unused steps from resampler
resampler = search_cv.best_estimator_
resampler.set_params(
steps=_remove_unused_steps(steps=resampler.steps, remove_clf=True)
)
return [resampler, search_results] | 29c14261e0c5131c8fad653bb286d03c73b8ddd7 | 3,654,110 |
def grid(dim, num):
"""Build a one-dim grid of num points"""
if dim.type == "categorical":
return categorical_grid(dim, num)
elif dim.type == "integer":
return discrete_grid(dim, num)
elif dim.type == "real":
return real_grid(dim, num)
elif dim.type == "fidelity":
return fidelity_grid(dim, num)
else:
raise TypeError(
"Grid Search only supports `real`, `integer`, `categorical` and `fidelity`: "
f"`{dim.type}`\n"
"For more information on dimension types, see "
"https://orion.readthedocs.io/en/stable/user/searchspace.html"
) | 1d59936882cd15372e0c13c02d80cbe739650134 | 3,654,111 |
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
for cls in concatv(source.mro(), _virtual_superclasses):
if cls in graph:
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
graph = graph.subgraph([n for n in graph.nodes()
if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
edge = adjacency(graph)
def path_part(src, tgt):
node = edge[src][tgt]
return PathPart(src, tgt, node['func'], node['cost'])
return map(path_part, pth, pth[1:]) | 6bdf2adbfc754dc5350406570bc865ac17c088ce | 3,654,112 |
def is_resource_sufficient(order_ingredients):
"""Return true or false"""
for item in order_ingredients:
if order_ingredients[item]>=resources[item]:
print(f"Sorry not Enough {item} to Make Coffee.")
return False
return True | 758ab17760aac8f32b4d5fb93e42e01bc780507b | 3,654,113 |
import requests
def get_gh_releases_api(project, version=None):
"""
"""
# https://developer.github.com/v3/auth/
# self.headers = {'Authorization': 'token %s' % self.api_token}
# https://api.github.com/repos/pygame/stuntcat/releases/latest
repo = get_repo_from_url(project.github_repo)
if not repo:
return
url = f'https://api.github.com/repos/{repo}/releases'
if version is not None:
url += f'/{version}'
if Config.GITHUB_RELEASES_OAUTH is None:
headers = {}
else:
headers = {'Authorization': 'token %s' % Config.GITHUB_RELEASES_OAUTH}
resp = requests.get(
url,
headers = headers
)
if resp.status_code != 200:
raise ValueError('github api failed')
data = resp.json()
return data | 348857ab557277f7b26cb93866284ac899746524 | 3,654,114 |
import random
import logging
def weak_move(board):
"""Weak AI - makes a random valid move.
Args:
board: (Board) The game board.
Returns:
Array: Our chosen move.
"""
valid_moves = _get_moves(board, Square.black)
# Return a random valid move
our_move = valid_moves[random.randrange(0, len(valid_moves))]
logging.info('Weak AI chose r%sc%s', our_move[0], our_move[1])
return our_move | 6c978b58cca58baadaab5417b27adbf4444d59ff | 3,654,115 |
def flow_to_image(flow):
"""
Input:
flow:
Output:
Img array:
Description:
Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
out.append(img)
return np.float32(np.uint8(out)) | b7ed9cf684b4b818397f0329f3c7de1dbfa2ecd8 | 3,654,116 |
def parse_model_value(value, context):
"""
do interpolation first from context,
"x is {size}" with size = 5 will be interpolated to "x is 5"
then return interpolated string
:param value:
:param context:
:return:
"""
return value.format(**context) | 58cee6092bc03debe636ae8fa47878727457d334 | 3,654,117 |
def apply_tropomi_operator(
filename,
n_elements,
gc_startdate,
gc_enddate,
xlim,
ylim,
gc_cache,
build_jacobian,
sensi_cache,
):
"""
Apply the tropomi operator to map GEOS-Chem methane data to TROPOMI observation space.
Arguments
filename [str] : TROPOMI netcdf data file to read
n_elements [int] : Number of state vector elements
gc_startdate [datetime64] : First day of inversion period, for GEOS-Chem and TROPOMI
gc_enddate [datetime64] : Last day of inversion period, for GEOS-Chem and TROPOMI
xlim [float] : Longitude bounds for simulation domain
ylim [float] : Latitude bounds for simulation domain
gc_cache [str] : Path to GEOS-Chem output data
build_jacobian [log] : Are we trying to map GEOS-Chem sensitivities to TROPOMI observation space?
sensi_cache [str] : If build_jacobian=True, this is the path to the GEOS-Chem sensitivity data
Returns
output [dict] : Dictionary with one or two fields:
- obs_GC : GEOS-Chem and TROPOMI methane data
- TROPOMI methane
- GEOS-Chem methane
- TROPOMI lat, lon
- TROPOMI lat index, lon index
If build_jacobian=True, also include:
- K : Jacobian matrix
"""
# Read TROPOMI data
TROPOMI = read_tropomi(filename)
if TROPOMI == None:
print(f"Skipping {filename} due to file processing issue.")
return TROPOMI
# We're only going to consider data within lat/lon/time bounds, with QA > 0.5, and with safe surface albedo values
sat_ind = filter_tropomi(TROPOMI, xlim, ylim, gc_startdate, gc_enddate)
# Number of TROPOMI observations
n_obs = len(sat_ind[0])
print("Found", n_obs, "TROPOMI observations.")
# If need to build Jacobian from GEOS-Chem perturbation simulation sensitivity data:
if build_jacobian:
# Initialize Jacobian K
jacobian_K = np.zeros([n_obs, n_elements], dtype=np.float32)
jacobian_K.fill(np.nan)
# Initialize a list to store the dates we want to look at
all_strdate = []
# For each TROPOMI observation
for k in range(n_obs):
# Get the date and hour
iSat = sat_ind[0][k] # lat index
jSat = sat_ind[1][k] # lon index
time = pd.to_datetime(str(TROPOMI["utctime"][iSat]))
strdate = time.round("60min").strftime("%Y%m%d_%H")
all_strdate.append(strdate)
all_strdate = list(set(all_strdate))
# Read GEOS_Chem data for the dates of interest
all_date_gc = read_all_geoschem(all_strdate, gc_cache, build_jacobian, sensi_cache)
# Initialize array with n_obs rows and 6 columns. Columns are TROPOMI CH4, GEOSChem CH4, longitude, latitude, II, JJ
obs_GC = np.zeros([n_obs, 6], dtype=np.float32)
obs_GC.fill(np.nan)
# For each TROPOMI observation:
for k in range(n_obs):
# Get GEOS-Chem data for the date of the observation:
iSat = sat_ind[0][k]
jSat = sat_ind[1][k]
p_sat = TROPOMI["pressures"][iSat, jSat, :]
dry_air_subcolumns = TROPOMI["dry_air_subcolumns"][iSat, jSat, :] # mol m-2
apriori = TROPOMI["methane_profile_apriori"][iSat, jSat, :] # mol m-2
avkern = TROPOMI["column_AK"][iSat, jSat, :]
time = pd.to_datetime(str(TROPOMI["utctime"][iSat]))
strdate = time.round("60min").strftime("%Y%m%d_%H")
GEOSCHEM = all_date_gc[strdate]
# Find GEOS-Chem lats & lons closest to the corners of the TROPOMI pixel
longitude_bounds = TROPOMI["longitude_bounds"][iSat, jSat, :]
latitude_bounds = TROPOMI["latitude_bounds"][iSat, jSat, :]
corners_lon_index = []
corners_lat_index = []
for l in range(4):
iGC = nearest_loc(longitude_bounds[l], GEOSCHEM["lon"])
jGC = nearest_loc(latitude_bounds[l], GEOSCHEM["lat"])
corners_lon_index.append(iGC)
corners_lat_index.append(jGC)
# If the tolerance in nearest_loc() is not satisfied, skip the observation
if np.nan in corners_lon_index + corners_lat_index:
continue
# Get lat/lon indexes and coordinates of GEOS-Chem grid cells closest to the TROPOMI corners
ij_GC = [(x, y) for x in set(corners_lon_index) for y in set(corners_lat_index)]
gc_coords = [(GEOSCHEM["lon"][i], GEOSCHEM["lat"][j]) for i, j in ij_GC]
# Compute the overlapping area between the TROPOMI pixel and GEOS-Chem grid cells it touches
overlap_area = np.zeros(len(gc_coords))
dlon = GEOSCHEM["lon"][1] - GEOSCHEM["lon"][0]
dlat = GEOSCHEM["lat"][1] - GEOSCHEM["lat"][0]
# Polygon representing TROPOMI pixel
polygon_tropomi = Polygon(np.column_stack((longitude_bounds, latitude_bounds)))
# For each GEOS-Chem grid cell that touches the TROPOMI pixel:
for gridcellIndex in range(len(gc_coords)):
# Define polygon representing the GEOS-Chem grid cell
coords = gc_coords[gridcellIndex]
geoschem_corners_lon = [
coords[0] - dlon / 2,
coords[0] + dlon / 2,
coords[0] + dlon / 2,
coords[0] - dlon / 2,
]
geoschem_corners_lat = [
coords[1] - dlat / 2,
coords[1] - dlat / 2,
coords[1] + dlat / 2,
coords[1] + dlat / 2,
]
polygon_geoschem = Polygon(
np.column_stack((geoschem_corners_lon, geoschem_corners_lat))
)
# Calculate overlapping area as the intersection of the two polygons
if polygon_geoschem.intersects(polygon_tropomi):
overlap_area[gridcellIndex] = polygon_tropomi.intersection(
polygon_geoschem
).area
# If there is no overlap between GEOS-Chem and TROPOMI, skip to next observation:
if sum(overlap_area) == 0:
continue
# =======================================================
# Map GEOS-Chem to TROPOMI observation space
# =======================================================
# Otherwise, initialize tropomi virtual xch4 and virtual sensitivity as zero
area_weighted_virtual_tropomi = 0 # virtual tropomi xch4
area_weighted_virtual_tropomi_sensitivity = 0 # virtual tropomi sensitivity
# For each GEOS-Chem grid cell that touches the TROPOMI pixel:
for gridcellIndex in range(len(gc_coords)):
# Get GEOS-Chem lat/lon indices for the cell
iGC, jGC = ij_GC[gridcellIndex]
# Get GEOS-Chem pressure edges for the cell
p_gc = GEOSCHEM["PEDGE"][iGC, jGC, :]
# Get GEOS-Chem methane for the cell
gc_CH4 = GEOSCHEM["CH4"][iGC, jGC, :]
# Get merged GEOS-Chem/TROPOMI pressure grid for the cell
merged = merge_pressure_grids(p_sat, p_gc)
# Remap GEOS-Chem methane to TROPOMI pressure levels
sat_CH4 = remap(
gc_CH4,
merged["data_type"],
merged["p_merge"],
merged["edge_index"],
merged["first_gc_edge"],
) # ppb
# Convert ppb to mol m-2
sat_CH4_molm2 = sat_CH4 * 1e-9 * dry_air_subcolumns # mol m-2
# Derive the column-averaged XCH4 that TROPOMI would see over this ground cell
# using eq. 46 from TROPOMI Methane ATBD, Hasekamp et al. 2019
virtual_tropomi_gridcellIndex = (
sum(apriori + avkern * (sat_CH4_molm2 - apriori))
/ sum(dry_air_subcolumns)
* 1e9
) # ppb
# Weight by overlapping area (to be divided out later) and add to sum
area_weighted_virtual_tropomi += (
overlap_area[gridcellIndex] * virtual_tropomi_gridcellIndex
) # ppb m2
# If building Jacobian matrix from GEOS-Chem perturbation simulation sensitivity data:
if build_jacobian:
# Get GEOS-Chem perturbation sensitivities at this lat/lon, for all vertical levels and state vector elements
sensi_lonlat = GEOSCHEM["Sensitivities"][iGC, jGC, :, :]
# Map the sensitivities to TROPOMI pressure levels
sat_deltaCH4 = remap_sensitivities(
sensi_lonlat,
merged["data_type"],
merged["p_merge"],
merged["edge_index"],
merged["first_gc_edge"],
) # mixing ratio, unitless
# Tile the TROPOMI averaging kernel
avkern_tiled = np.transpose(np.tile(avkern, (n_elements, 1)))
# Tile the TROPOMI dry air subcolumns
dry_air_subcolumns_tiled = np.transpose(
np.tile(dry_air_subcolumns, (n_elements, 1))
) # mol m-2
# Derive the change in column-averaged XCH4 that TROPOMI would see over this ground cell
tropomi_sensitivity_gridcellIndex = np.sum(
avkern_tiled * sat_deltaCH4 * dry_air_subcolumns_tiled, 0
) / sum(
dry_air_subcolumns
) # mixing ratio, unitless
# Weight by overlapping area (to be divided out later) and add to sum
area_weighted_virtual_tropomi_sensitivity += (
overlap_area[gridcellIndex] * tropomi_sensitivity_gridcellIndex
) # m2
# Compute virtual TROPOMI observation as weighted mean by overlapping area
# i.e., need to divide out area [m2] from the previous step
virtual_tropomi = area_weighted_virtual_tropomi / sum(overlap_area)
# Save actual and virtual TROPOMI data
obs_GC[k, 0] = TROPOMI["methane"][
iSat, jSat
] # Actual TROPOMI methane column observation
obs_GC[k, 1] = virtual_tropomi # Virtual TROPOMI methane column observation
obs_GC[k, 2] = TROPOMI["longitude"][iSat, jSat] # TROPOMI longitude
obs_GC[k, 3] = TROPOMI["latitude"][iSat, jSat] # TROPOMI latitude
obs_GC[k, 4] = iSat # TROPOMI index of longitude
obs_GC[k, 5] = jSat # TROPOMI index of latitude
if build_jacobian:
# Compute TROPOMI sensitivity as weighted mean by overlapping area
# i.e., need to divide out area [m2] from the previous step
jacobian_K[k, :] = area_weighted_virtual_tropomi_sensitivity / sum(
overlap_area
)
# Output
output = {}
# Always return the coincident TROPOMI and GEOS-Chem data
output["obs_GC"] = obs_GC
# Optionally return the Jacobian
if build_jacobian:
output["K"] = jacobian_K
return output | c449ddaf8113a3adfcd0e501cacc245bcf0af495 | 3,654,118 |
import yaml
from typing import cast
def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike:
"""Load an analysis configuration from a file.
Args:
yaml: YAML object to use in loading the configuration.
filename: Filename of the YAML configuration file.
Returns:
dict-like object containing the loaded configuration
"""
with open(filename, "r") as f:
config = yaml.load(f)
return cast(DictLike, config) | 6c3b9b54b6e22b40c61c901b2bcb3b6af4847214 | 3,654,119 |
def device_list(request):
"""
:param request:
:return:
"""
device_list = True
list = Device.objects.all()
return render(request, "back/device_list.html", locals()) | f4892f40831d25182b55414a666fbd62d6d978ef | 3,654,120 |
def L008_eval(segment, raw_stack, **kwargs):
""" This is a slightly odd one, because we'll almost always evaluate from a point a few places
after the problem site """
# We need at least two segments behind us for this to work
if len(raw_stack) < 2:
return True
else:
cm1 = raw_stack[-1]
cm2 = raw_stack[-2]
if cm2.name == 'comma':
if cm1.name not in ['whitespace', 'newline']:
# comma followed by something that isn't whitespace!
return cm2
elif cm1.raw not in ['\n', ' '] and not segment.is_comment:
return cm1
return True | 71c42999ffc76bd28a61b640cf85086b0b9e8d69 | 3,654,121 |
def overwrite_ruffus_args(args, config):
"""
:param args:
:param config:
:return:
"""
if config.has_section('Ruffus'):
cmdargs = dict()
cmdargs['draw_horizontally'] = bool
cmdargs['flowchart'] = str
cmdargs['flowchart_format'] = str
cmdargs['forced_tasks'] = lambda x: x.split()
cmdargs['history_file'] = str
cmdargs['jobs'] = int
cmdargs['just_print'] = bool
cmdargs['key_legend_in_graph'] = bool
cmdargs['log_file'] = str
cmdargs['recreate_database'] = bool
cmdargs['target_tasks'] = lambda x: x.split()
cmdargs['touch_files_only'] = bool
cmdargs['use_threads'] = bool
cmdargs['verbose'] = lambda x: x.split()
for k, v in config.items('Ruffus'):
try:
args.__setattr__(k, cmdargs[k](v))
except KeyError:
pass
return args | 6f947c362a37bfdc6df53c861783604999621a88 | 3,654,122 |
def read_sfr_df():
"""Reads and prepares the sfr_df
Parameters:
Returns:
sfr_df(pd.DataFrame): dataframe of the fits file mosdef_sfrs_latest.fits
"""
sfr_df = read_file(imd.loc_sfrs_latest)
sfr_df['FIELD_STR'] = [sfr_df.iloc[i]['FIELD'].decode(
"utf-8").rstrip() for i in range(len(sfr_df))]
return sfr_df | 9d0d16929ffd5043853096c01cafa00747104b9f | 3,654,123 |
def redshift(x, vo=0., ve=0.,def_wlog=False):
"""
x: The measured wavelength.
v: Speed of the observer [km/s].
ve: Speed of the emitter [km/s].
Returns:
The emitted wavelength l'.
Notes:
f_m = f_e (Wright & Eastman 2014)
"""
if np.isnan(vo):
vo = 0 # propagate nan as zero (@calibration in fib B)
a = (1.0+vo/c) / (1.0+ve/c)
if def_wlog:
return x + np.log(a) # logarithmic
#return x + a # logarithmic + approximation v << c
else:
return x * a
#return x / (1.0-v/c) | 0dee71d862d2dc4252033964a9adcb4428c5dfa9 | 3,654,124 |
import mmap
def overlay_spectra_plot(array, nrow=5,ncol=5,**kwargs):
"""
Overlay spectra on a collapsed cube.
Parameters
----------
array : 3D numpy array
nrow : int
Number of rows in the figure.
ncol : int
Number of columns in the figure.
**kwargs : dict
Keyword arguments passed to `ax.plot` for the spectra
Returns
-------
fig : matplotlib.figure.Figure
The figure object.
"""
cube = np.nan_to_num(array)
fig,ax = plt.subplots(subplot_kw={'projection':mmap.wcs},figsize=(10,10))
fig.set_constrained_layout(False)
collapsed_cube = np.nanmean(cube,axis=2)
vmin,vmax = np.percentile(collapsed_cube[collapsed_cube>0], [0.1,99.9])
ax.imshow(collapsed_cube,cmap='Greys',norm=mpl.colors.LogNorm(vmin=vmin,vmax=vmin))
w = 1/ncol # in figure coords
h = 1/nrow # in figure coords
dr,dc = collapsed_cube.shape
# create grid of inset_axes on figure
for i in range(nrow):
for j in range(ncol):
b,l = i*h, j*w
#print(f'left:{l:0.1f} col: {j} bottom:{b:0.1f} row:{i}')
bl = [b,l]
ax2 = ax.inset_axes([l,b,w,h])
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_facecolor('none')
#ax.add_patch(mpl.patches.Rectangle([l,b],w,h,transform=ax.transAxes,color='r',alpha=0.5))
#ju.annotate(f'row:{i} col:{j}',l,b,ha='left',va='bottom',ax=ax,transform='axes')
#print(f'{int(b*dr)}:{int((b+h)*dr)},{int(l*dc)}:{int((l+w)*dc)}')
line = np.nanmean(mmap.co[sl][int(b*dr):int((b+h)*dr),int(l*dc):int((l+w)*dc),vsl],axis=(0,1))
ax2.plot(mmap.v[vsl],ju.scale_ptp(line),'r',lw=1,**kwargs)
ax2.set_ylim(ax2.get_ylim()[0],max(ax2.get_ylim()[1],.3))
#ax2.set_axis_off()
#ax.add_patch(mpl.patches.Rectangle([bl[0],bl[1]],w*dc,h*dr,transform=ax.transData,alpha=0.25))
return fig | 8abbbbe7667c57bea50575a58bf11c3b080c8608 | 3,654,125 |
def digest_from_rsa_scheme(scheme, hash_library=DEFAULT_HASH_LIBRARY):
"""
<Purpose>
Get digest object from RSA scheme.
<Arguments>
scheme:
A string that indicates the signature scheme used to generate
'signature'. Currently supported RSA schemes are defined in
`securesystemslib.keys.RSA_SIGNATURE_SCHEMES`
hash_library:
The crypto library to use for the given hash algorithm (e.g., 'hashlib').
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if an unsupported
hashing algorithm is specified, or digest could not be generated with given
the algorithm.
securesystemslib.exceptions.UnsupportedLibraryError, if an unsupported
library was requested via 'hash_library'.
<Side Effects>
None.
<Returns>
Digest object
e.g.
hashlib.new(algorithm) or
PycaDiggestWrapper object
"""
# Are the arguments properly formatted? If not, raise
# 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
# Get hash algorithm from rsa scheme (hash algorithm id is specified after
# the last dash; e.g. rsassa-pss-sha256 -> sha256)
hash_algorithm = scheme.split('-')[-1]
return digest(hash_algorithm, hash_library) | 6eaf10657a0e80f2ddfa5eacbcc1bac72437ca51 | 3,654,126 |
import os
def getconfig(filename):
"""
1. Checks if the config file exists.
2. If not, creates it with the content in default_config.
3. Reads the config file and returns it.
Returns False in case of errors.
"""
global default_config
if os.path.exists(filename):
configfile = open(filename, "r")
else:
try:
f = open(filename, 'w')
f.write(default_config)
f.close()
configfile = open(filename, "r")
except IOError:
return False
ret = []
for line in configfile:
line = line.strip()
if not line or line[0] == '#':
continue
else:
ret.append(line)
configfile.close()
return ret | 8c9cf7110a7279638e2051454a1e26ca25f69e6b | 3,654,127 |
def table(content, accesskey:str ="", class_: str ="", contenteditable: str ="",
data_key: str="", data_value: str="", dir_: str="", draggable: str="",
hidden: str="", id_: str="", lang: str="", spellcheck: str="",
style: str="", tabindex: str="", title: str="", translate: str=""):
"""
Returns a table.\n
`content`: Contents of the table.\n
"""
g_args = global_args(accesskey, class_, contenteditable, data_key, data_value,
dir_, draggable, hidden, id_, lang, spellcheck, style,
tabindex, title, translate)
return f"<table {g_args}>{content}</table>\n" | b27cf1b1897bdbc764fff76edc2e53fa0aca7861 | 3,654,128 |
def _ev_match(
output_dir, last_acceptable_entry_index, certificate, entry_type,
extra_data, certificate_index):
"""Matcher function for the scanner. Returns the certificate's hash if
it is a valid, non-expired, EV certificate, None otherwise."""
# Only generate whitelist for non-precertificates. It is expected that if
# a precertificate was submitted then the issued SCT would be embedded
# in the final certificate.
if entry_type != client_pb2.X509_ENTRY:
return None
# No point in including expired certificates.
if certificate.is_expired():
return None
# Do not include entries beyond the last entry included in the whitelist
# generated on January 1st, 2015.
if certificate_index > last_acceptable_entry_index:
return None
# Only include certificates that have an EV OID.
matching_policies = find_matching_policies(certificate)
if not matching_policies:
return None
# Removed the requirement that the root of the chain matches the root that
# should be used for the EV policy OID.
# See https://code.google.com/p/chromium/issues/detail?id=524635 for
# details.
# Matching certificate
if output_dir:
_write_cert_and_chain(
output_dir, certificate, extra_data, certificate_index)
return calculate_certificate_hash(certificate) | acd8416546d5f687fd1bfc1f0edfc099cde4408d | 3,654,129 |
def axis_ratio_disklike(scale=0.3, truncate=0.2):
"""Sample (one minus) the axis ratio of a disk-like galaxy from the Rayleigh distribution
Parameters
----------
scale : float
scale of the Rayleigh distribution; the bigger, the smaller the axis ratio
truncate : float
the minimum value of the axis ratio
Note
----
The default parameters are used in Lenspop ([1]_) and are expected for elliptical sources.
References
----------
.. [1] Collett, Thomas E.
"The population of galaxy–galaxy strong lenses in forthcoming optical imaging surveys."
The Astrophysical Journal 811.1 (2015): 20.
Returns
-------
float
the axis ratio
"""
q = 0.0
while q < truncate:
q = 1.0 - np.random.rayleigh(scale, size=None)
return q | d001ef0b2f5896f4e7f04f314cd4e71ffd97a277 | 3,654,130 |
import numpy
def rk4(y0, t0, te, N, deriv, filename=None):
"""
General RK4 driver for
N coupled differential eq's,
fixed stepsize
Input:
- y0: Vector containing initial values for y
- t0: Initial time
- te: Ending time
- N: Number of steps
- deriv: See rk4_step
- filename: Optional, use if you want to write
data to file at each step.
Format used:
t y[0] y[1] ... (%10.15E)
Output:
If filename=None, return tuple containing:
- time: Array of times at which it has iterated over
- yout: N*len(y0) numpy array containing y for each timestep
If filename specified, None is returned.
"""
h = (te-t0)/float(N)
t = t0;
if filename == None:
#Setup arrays
time = numpy.zeros(N);
yout = []
#Inital values
yout.append(y0);
time[0] = t0;
t = t0;
#Loop over timesteps
for i in xrange(1,N):
yout.append(rk4_step(yout[i-1],t,h,deriv));
t = t0 + h*i;
time[i] = t;
return (time,yout)
else:
ofile = open(filename,'w')
#Format string used for output file
ostring = "%20.8E " + ("%20.8E "*len(y0)) + "\n"
#Initial values
y = y0
t = t0
foo = [t]; foo[1:] = y;
ofile.write(ostring % tuple(foo))
while (t < te):
y = rk4_step(y,t,h,deriv)
t +=h
foo = [t]; foo[1:] = y;
ofile.write(ostring % tuple(foo))
ofile.close()
return None | 93b7255fc95f06f765df12930efcf89338970ee6 | 3,654,131 |
from typing import Dict
from typing import Tuple
def create_txt_substitute_record_rule_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
name = args.get('name')
rp_zone = args.get('rp_zone')
comment = args.get('comment')
text = args.get('text')
infoblox_object_type = 'record:rpz:txt'
raw_response = client.create_substitute_record_rule(infoblox_object_type, name=name, rp_zone=rp_zone,
comment=comment, text=text)
rule = raw_response.get('result', {})
fixed_keys_rule_res = {RESPONSE_TRANSLATION_DICTIONARY.get(key, string_to_context_key(key)): val for key, val in
rule.items()}
title = f'{INTEGRATION_NAME} - Response Policy Zone rule: {name} has been created:'
context = {
f'{INTEGRATION_CONTEXT_NAME}.ModifiedResponsePolicyZoneRules(val.Name && val.Name === obj.Name)': fixed_keys_rule_res}
human_readable = tableToMarkdown(title, fixed_keys_rule_res, headerTransform=pascalToSpace)
return human_readable, context, raw_response | ada3c412ec166eedd04edb2219396da6aef967ea | 3,654,132 |
def pot_rho_linear(SP, t, rho0=1025, a=2e-4, b=7e-4, SP0=35, t0=15):
"""
Potential density calculated using a linear equation of state:
Parameters
----------
SP : array-like
Salinity [g/kg]
t : array-like
Temperature [°C]
rho0 : float, optional
Constant density [kg/m^3]
a : float, optional
Thermal expansion coefficient [1/°C]
b : float, optional
saline expansion coefficient [kg/g]
SP0 : float, optional
Constant salinity [g/kg]
t0 : float, optional
Constant temperature [°C]
Returns
-------
pot_rho : ndarray
Potential density [kg/m^3]
"""
return rho0 * (1 - a * (t - t0) + b * (SP - SP0)) | 47dd8248239d2147ff50d1b179d3fc4392c173cb | 3,654,133 |
def oembed(url, params=""):
"""
Render an OEmbed-compatible link as an embedded item.
:param url: A URL of an OEmbed provider.
:return: The OEMbed ``<embed>`` code.
"""
# Note: this method isn't currently very efficient - the data isn't
# cached or stored.
kwargs = dict(urlparse.parse_qsl(params))
try:
return mark_safe(get_oembed_data(
url,
**kwargs
)['html'])
except (KeyError, ProviderException):
if settings.DEBUG:
return "No OEmbed data returned"
return "" | eece184ee8a17613607f190b2de002fb6026340c | 3,654,134 |
async def create_assc(conn : asyncpg.Connection, name : str, type : str,
base : str, leader : int) -> Association:
"""Create an association with the fields given.
type must be 'Brotherhood','College', or 'Guild'.
"""
psql = """
SELECT assc_id
FROM associations
WHERE assc_name = $1;
"""
if await conn.fetchval(psql, name) is not None:
raise Checks.NameTaken(name)
psql1 = """
WITH rows AS (
INSERT INTO associations
(assc_name, assc_type, leader_id, assc_icon, base)
VALUES ($1, $2, $3, $4, $5)
RETURNING assc_id
)
SELECT assc_id FROM rows;
"""
psql2 = """
UPDATE players
SET assc = $1, guild_rank = 'Leader'
WHERE user_id = $2;
"""
psql3 = """
INSERT INTO brotherhood_champions (assc_id)
VALUES ($1);
"""
assc_id = await conn.fetchval(
psql1, name, type, leader, Vars.DEFAULT_ICON, base)
await conn.execute(psql2, assc_id, leader)
if type == "Brotherhood":
await conn.execute(psql3, assc_id)
return await get_assc_by_id(conn, assc_id) | 3089b6033e31325d7b3942d9d887b89cec21ca1c | 3,654,135 |
def filter_tof_to_csr(
tof_slices: np.ndarray,
push_indices: np.ndarray,
tof_indices: np.ndarray,
push_indptr: np.ndarray,
) -> tuple:
"""Get a CSR-matrix with raw indices satisfying push indices and tof slices.
Parameters
----------
tof_slices : np.int64[:, 3]
Each row of the array is assumed to be a (start, stop, step) tuple.
This array is assumed to be sorted, disjunct and strictly increasing
(i.e. np.all(np.diff(tof_slices[:, :2].ravel()) >= 0) = True).
push_indices : np.int64[:]
The push indices from where to retrieve the TOF slices.
tof_indices : np.uint32[:]
The self.tof_indices array of a TimsTOF object.
push_indptr : np.int64[:]
The self.push_indptr array of a TimsTOF object.
Returns
-------
(np.int64[:], np.int64[:], np.int64[:],)
An (indptr, values, columns) tuple, where indptr are push indices,
values raw indices, and columns the tof_slices.
"""
indptr = [0]
values = []
columns = []
for push_index in push_indices:
start = push_indptr[push_index]
end = push_indptr[push_index + 1]
idx = start
for i, (tof_start, tof_stop, tof_step) in enumerate(tof_slices):
idx += np.searchsorted(tof_indices[idx: end], tof_start)
tof_value = tof_indices[idx]
while (tof_value < tof_stop) and (idx < end):
if tof_value in range(tof_start, tof_stop, tof_step):
values.append(idx)
columns.append(i)
break # TODO what if multiple hits?
idx += 1
tof_value = tof_indices[idx]
indptr.append(len(values))
return np.array(indptr), np.array(values), np.array(columns) | 925fc93c6e275ead7d472469764fbbb1093aa6bd | 3,654,136 |
def get_path_from_query_string(req):
"""Gets path from query string
Args:
req (flask.request): Request object from Flask
Returns:
path (str): Value of "path" parameter from query string
Raises:
exceptions.UserError: If "path" is not found in query string
"""
if req.args.get('path') is None:
raise exceptions.UserError('Path not found in query string')
return req.args.get('path') | 7e279e8e33dbbaa6ceb18d4b9a61723826522ec3 | 3,654,137 |
import scipy
def entropy_grassberger(n, base=None):
""""
Estimate the entropy of a discrete distribution from counts per category
n: array of counts
base: base in which to measure the entropy (default: nats)
"""
N = np.sum(n)
entropy = np.log(N) - np.sum(n*scipy.special.digamma(n+1e-20))/N
if base:
entropy /= np.log(base)
return entropy | 1dc5ced1f5bb43bce30fa9501632825648b19cb8 | 3,654,138 |
def get_param_layout():
"""Get layout for causality finding parameters definition window
Parameters
----------
Returns
-------
`List[List[Element]]`
Layout for causality finding parameters window
"""
box = [
[
sg.Text('Parameters')
],
[
sg.Text(' Epochs: '),sg.Input(key=cte.EPOCHS,size=(10,1), default_text="1000"),
sg.Text(' Kernel Size: '),sg.Input(key=cte.KERNEL,size=(10,1), default_text="4")
],
[
sg.Text(' Depth: '),sg.Input(key=cte.LEVEL,size=(10,1), default_text="1"),
sg.Text(' Learning Rate: '),sg.Input(key=cte.RATE,size=(10,1), default_text="0.01")
],
[
sg.Text(' Dilation: '),sg.Input(key=cte.DILATION,size=(10,1), default_text="4"),
sg.Text(' Significance: '),sg.Input(key=cte.SIGNIFICANCE,size=(10,1), default_text="0.8")
],
[
sg.Text('Optimizer: '),sg.Input(key=cte.OPTIMIZER,size=(10,1), default_text="Adam"),
sg.Text(' Log Interval: '),sg.Input(key=cte.LOGINT,size=(10,1), default_text="500")
],
[sg.Button('Create Causal Graph', key=cte.CREATE)]
]
return box | e09db05b848e71449d7d17004793e8ce167dca1a | 3,654,139 |
def senti_histplot(senti_df):
"""histogram plot for sentiment"""
senti_hist = (
alt.Chart(senti_df)
.mark_bar()
.encode(alt.Y(cts.SENTI, bin=True), x="count()", color=cts.SENTI,)
.properties(height=300, width=100)
).interactive()
return senti_hist | 731fda9cf5af49fdbec7d1a16edbf65148e67d5a | 3,654,140 |
def pd_df_sampling(df, coltarget="y", n1max=10000, n2max=-1, isconcat=1):
"""
DownSampler
:param df:
:param coltarget: binary class
:param n1max:
:param n2max:
:param isconcat:
:return:
"""
df1 = df[df[coltarget] == 0].sample(n=n1max)
n2max = len(df[df[coltarget] == 1]) if n2max == -1 else n2max
df0 = df[df[coltarget] == 1].sample(n=n2max)
if isconcat:
df2 = pd.concat((df1, df0))
df2 = df2.sample(frac=1.0, replace=True)
return df2
else:
print("y=1", n2max, "y=0", len(df1))
return df0, df1 | 2cec90c189d00a8cd3ec19224fa7a2685c135bf2 | 3,654,141 |
from functools import partial
from pathlib import Path
from argparse import (
ArgumentParser,
ArgumentDefaultsHelpFormatter,
)
from packaging.version import Version
from .version import check_latest, is_flagged
from niworkflows.utils.spaces import Reference, OutputReferencesAction
from json import loads
def _build_parser():
"""Build parser object."""
def _path_exists(path, parser):
"""Ensure a given path exists."""
if path is None or not Path(path).exists():
raise parser.error(f"Path does not exist: <{path}>.")
return Path(path).absolute()
def _min_one(value, parser):
"""Ensure an argument is not lower than 1."""
value = int(value)
if value < 1:
raise parser.error("Argument can't be less than one.")
return value
def _to_gb(value):
scale = {"G": 1, "T": 10 ** 3, "M": 1e-3, "K": 1e-6, "B": 1e-9}
digits = "".join([c for c in value if c.isdigit()])
units = value[len(digits):] or "M"
return int(digits) * scale[units[0]]
def _drop_sub(value):
value = str(value)
return value.lstrip("sub-")
def _bids_filter(value):
if value and Path(value).exists():
return loads(Path(value).read_text())
verstr = f"dMRIPrep v{config.environment.version}"
currentv = Version(config.environment.version)
is_release = not any(
(currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease)
)
parser = ArgumentParser(
description="dMRIPrep: dMRI PREProcessing workflows v{}".format(
config.environment.version
),
formatter_class=ArgumentDefaultsHelpFormatter,
)
PathExists = partial(_path_exists, parser=parser)
PositiveInt = partial(_min_one, parser=parser)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument(
"bids_dir",
action="store",
type=PathExists,
help="the root folder of a BIDS valid dataset (sub-XXXXX folders should "
"be found at the top level in this folder).",
)
parser.add_argument(
"output_dir",
action="store",
type=Path,
help="the output path for the outcomes of preprocessing and visual " "reports",
)
parser.add_argument(
"analysis_level",
choices=["participant"],
help='processing stage to be run, only "participant" in the case of '
"dMRIPrep (see BIDS-Apps specification).",
)
# optional arguments
parser.add_argument("--version", action="version", version=verstr)
g_bids = parser.add_argument_group("Options for filtering BIDS queries")
g_bids.add_argument(
"--skip-bids-validation",
action="store_true",
default=False,
help="assume the input dataset is BIDS compliant and skip the validation",
)
g_bids.add_argument(
"--participant-label",
"--participant_label",
action="store",
nargs="+",
type=_drop_sub,
help="a space delimited list of participant identifiers or a single "
"identifier (the sub- prefix can be removed)",
)
g_bids.add_argument(
"--bids-filter-file",
dest="bids_filters",
action="store",
type=_bids_filter,
metavar="PATH",
help="a JSON file describing custom BIDS input filter using pybids "
"{<suffix>:{<entity>:<filter>,...},...} "
"(https://github.com/bids-standard/pybids/blob/master/bids/layout/config/bids.json)",
)
g_bids.add_argument(
"--anat-derivatives", action='store', metavar="PATH", type=PathExists,
help="Reuse the anatomical derivatives from another fMRIPrep run or calculated "
"with an alternative processing tool (NOT RECOMMENDED)."
)
g_perfm = parser.add_argument_group("Options to handle performance")
g_perfm.add_argument(
"--nprocs",
"--nthreads",
"--n_cpus",
"-n-cpus",
action="store",
type=PositiveInt,
help="maximum number of threads across all processes",
)
g_perfm.add_argument(
"--omp-nthreads",
action="store",
type=PositiveInt,
help="maximum number of threads per-process",
)
g_perfm.add_argument(
"--mem",
"--mem_mb",
"--mem-mb",
dest="memory_gb",
action="store",
type=_to_gb,
help="upper bound memory limit for dMRIPrep processes",
)
g_perfm.add_argument(
"--low-mem",
action="store_true",
help="attempt to reduce memory usage (will increase disk usage "
"in working directory)",
)
g_perfm.add_argument(
"--use-plugin",
action="store",
default=None,
help="nipype plugin configuration file",
)
g_perfm.add_argument(
"--anat-only", action="store_true", help="run anatomical workflows only"
)
g_perfm.add_argument(
"--boilerplate_only",
action="store_true",
default=False,
help="generate boilerplate only",
)
g_perfm.add_argument(
"--md-only-boilerplate",
action="store_true",
default=False,
help="skip generation of HTML and LaTeX formatted citation with pandoc",
)
g_perfm.add_argument(
"-v",
"--verbose",
dest="verbose_count",
action="count",
default=0,
help="increases log verbosity for each occurrence, debug level is -vvv",
)
g_conf = parser.add_argument_group("Workflow configuration")
g_conf.add_argument(
"--ignore",
required=False,
action="store",
nargs="+",
default=[],
choices=["fieldmaps", "slicetiming", "sbref"],
help="ignore selected aspects of the input dataset to disable corresponding "
"parts of the workflow (a space delimited list)",
)
g_conf.add_argument(
"--longitudinal",
action="store_true",
help="treat dataset as longitudinal - may increase runtime",
)
g_conf.add_argument(
"--output-spaces",
nargs="*",
action=OutputReferencesAction,
help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<SPACE>[:cohort-<label>][:res-<resolution>][...]``, where ``<SPACE>`` is \
a keyword designating a spatial reference, and may be followed by optional, \
colon-separated parameters. \
Non-standard spaces imply specific orientations and sampling grids. \
The default value of this flag (meaning, if the argument is not include in the command line) \
is ``--output-spaces run`` - the original space and sampling grid of the original DWI run. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization. To generate no DWI outputs (if that is intended for some reason), \
use this option without specifying any spatial references. For further details, please check out \
https://www.nipreps.org/dmriprep/en/%s/spaces.html"""
% (currentv.base_version if is_release else "latest"),
)
# ANTs options
g_ants = parser.add_argument_group("Specific options for ANTs registrations")
g_ants.add_argument(
"--skull-strip-template",
default="OASIS30ANTs",
type=Reference.from_string,
help="select a template for skull-stripping with antsBrainExtraction",
)
g_ants.add_argument(
"--skull-strip-fixed-seed",
action="store_true",
help="do not use a random seed for skull-stripping - will ensure "
"run-to-run replicability when used with --omp-nthreads 1",
)
# Fieldmap options
g_fmap = parser.add_argument_group("Specific options for handling fieldmaps")
g_fmap.add_argument(
"--fmap-bspline",
action="store_true",
default=False,
help="fit a B-Spline field using least-squares (experimental)",
)
g_fmap.add_argument(
"--fmap-no-demean",
action="store_false",
default=True,
help="do not remove median (within mask) from fieldmap",
)
# SyN-unwarp options
g_syn = parser.add_argument_group("Specific options for SyN distortion correction")
g_syn.add_argument(
"--use-syn-sdc",
action="store_true",
default=False,
help="EXPERIMENTAL: Use fieldmap-free distortion correction",
)
g_syn.add_argument(
"--force-syn",
action="store_true",
default=False,
help="EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to "
"fieldmap correction, if available",
)
# FreeSurfer options
g_fs = parser.add_argument_group("Specific options for FreeSurfer preprocessing")
g_fs.add_argument(
"--fs-license-file",
metavar="PATH",
type=PathExists,
help="Path to FreeSurfer license key file. Get it (for free) by registering"
" at https://surfer.nmr.mgh.harvard.edu/registration.html",
)
g_fs.add_argument(
"--fs-subjects-dir",
metavar="PATH",
type=Path,
help="Path to existing FreeSurfer subjects directory to reuse. "
"(default: OUTPUT_DIR/freesurfer)",
)
# Surface generation xor
g_surfs = parser.add_argument_group("Surface preprocessing options")
g_surfs_xor = g_surfs.add_mutually_exclusive_group()
g_surfs_xor.add_argument(
"--no-submm-recon",
action="store_false",
dest="hires",
help="disable sub-millimeter (hires) reconstruction",
)
g_surfs_xor.add_argument(
"--fs-no-reconall",
action="store_false",
dest="run_reconall",
help="disable FreeSurfer surface preprocessing.",
)
g_other = parser.add_argument_group("Other options")
g_other.add_argument(
"-w",
"--work-dir",
action="store",
type=Path,
default=Path("work").absolute(),
help="path where intermediate results should be stored",
)
g_other.add_argument(
"--clean-workdir",
action="store_true",
default=False,
help="Clears working directory of contents. Use of this flag is not"
"recommended when running concurrent processes of dMRIPrep.",
)
g_other.add_argument(
"--resource-monitor",
action="store_true",
default=False,
help="enable Nipype's resource monitoring to keep track of memory and CPU usage",
)
g_other.add_argument(
"--reports-only",
action="store_true",
default=False,
help="only generate reports, don't run workflows. This will only rerun report "
"aggregation, not reportlet generation for specific nodes.",
)
g_other.add_argument(
"--run-uuid",
action="store",
default=None,
help="Specify UUID of previous run, to include error logs in report. "
"No effect without --reports-only.",
)
g_other.add_argument(
"--write-graph",
action="store_true",
default=False,
help="Write workflow graph.",
)
g_other.add_argument(
"--stop-on-first-crash",
action="store_true",
default=False,
help="Force stopping on first crash, even if a work directory"
" was specified.",
)
g_other.add_argument(
"--notrack",
action="store_true",
default=False,
help="Opt-out of sending tracking information of this run to "
"the dMRIPREP developers. This information helps to "
"improve dMRIPREP and provides an indicator of real "
"world usage crucial for obtaining funding.",
)
g_other.add_argument(
"--sloppy",
dest="debug",
action="store_true",
default=False,
help="Use low-quality tools for speed - TESTING ONLY",
)
latest = check_latest()
if latest is not None and currentv < latest:
print(
"""\
You are using dMRIPrep-%s, and a newer version of dMRIPrep is available: %s.
Please check out our documentation about how and when to upgrade:
https://dmriprep.readthedocs.io/en/latest/faq.html#upgrading"""
% (currentv, latest),
file=sys.stderr,
)
_blist = is_flagged()
if _blist[0]:
_reason = _blist[1] or "unknown"
print(
"""\
WARNING: Version %s of dMRIPrep (current) has been FLAGGED
(reason: %s).
That means some severe flaw was found in it and we strongly
discourage its usage."""
% (config.environment.version, _reason),
file=sys.stderr,
)
return parser | ac2a2b8b3f6cab89f0720cb666ae3c7b9d69d730 | 3,654,142 |
def get_deliverer(batch_size, max_staleness, session):
""" Helper function to returns the correct deliverer class for the
batch_size and max_stalennes parameters
"""
if batch_size < 1:
return SimpleDeliverer(session)
else:
return BatchDeliverer(session, batch_size, max_staleness) | 544740a5f38befc4d8123e7835ba758feac2d35b | 3,654,143 |
import copy
def trace_fweight_deprecated(fimage, xinit, ltrace=None, rtraceinvvar=None, radius=3.):
""" Python port of trace_fweight.pro from IDLUTILS
Parameters:
-----------
fimage: 2D ndarray
Image for tracing
xinit: ndarray
Initial guesses for x-trace
invvar: ndarray, optional
Inverse variance array for the image
radius: float, optional
Radius for centroiding; default to 3.0
"""
# Init
nx = fimage.shape[1]
ny = fimage.shape[0]
ncen = len(xinit)
xnew = copy.deepcopy(xinit)
xerr = np.zeros(ncen) + 999.
ycen = np.arange(ny, dtype=int)
invvar = 0. * fimage + 1.
x1 = xinit - radius + 0.5
x2 = xinit + radius + 0.5
ix1 = np.floor(x1).astype(int)
ix2 = np.floor(x2).astype(int)
fullpix = int(np.maximum(np.min(ix2-ix1)-1, 0))
sumw = np.zeros(ny)
sumxw = np.zeros(ny)
sumwt = np.zeros(ny)
sumsx1 = np.zeros(ny)
sumsx2 = np.zeros(ny)
qbad = np.array([False]*ny)
if invvar is None:
invvar = np.zeros_like(fimage) + 1.
# Compute
for ii in range(0,fullpix+3):
spot = ix1 - 1 + ii
ih = np.clip(spot,0,nx-1)
xdiff = spot - xinit
#
wt = np.clip(radius - np.abs(xdiff) + 0.5,0,1) * ((spot >= 0) & (spot < nx))
sumw = sumw + fimage[ycen,ih] * wt
sumwt = sumwt + wt
sumxw = sumxw + fimage[ycen,ih] * xdiff * wt
var_term = wt**2 / (invvar[ycen,ih] + (invvar[ycen,ih] == 0))
sumsx2 = sumsx2 + var_term
sumsx1 = sumsx1 + xdiff**2 * var_term
#qbad = qbad or (invvar[ycen,ih] <= 0)
qbad = np.any([qbad, invvar[ycen,ih] <= 0], axis=0)
# Fill up
good = (sumw > 0) & (~qbad)
if np.sum(good) > 0:
delta_x = sumxw[good]/sumw[good]
xnew[good] = delta_x + xinit[good]
xerr[good] = np.sqrt(sumsx1[good] + sumsx2[good]*delta_x**2)/sumw[good]
bad = np.any([np.abs(xnew-xinit) > radius + 0.5, xinit < radius - 0.5, xinit > nx - 0.5 - radius], axis=0)
if np.sum(bad) > 0:
xnew[bad] = xinit[bad]
xerr[bad] = 999.0
# Return
return xnew, xerr | e927113477a277ceb9acc8ce6af8bd1689c2913c | 3,654,144 |
from datetime import datetime
def home():
"""Renders the card page."""
cardStack = model.CardStack()
return render_template(
'cards.html',
title ='POSTIN - Swipe',
cardSet = cardStack.cardList,
year=datetime.now().year,
) | 203021b1da4833418aafd3e3e20964e3b765a816 | 3,654,145 |
import os
import io
import sys
import tarfile
def uncompress(filename: str, path: str = os.getcwd()) -> None:
"""Uncompress a tar file
Args:
filename: a tar file (tar, tgz, ...)
path: where the filename will be uncompressed
Example:
>>> import robotathome as rh
>>> rh.uncompress('~/WORKSPACE/Robot@Home2_db.tgz')
"""
class ProgressFileObject(io.FileIO):
def __init__(self, path, *args, **kwargs):
self._total_size = os.path.getsize(path)
io.FileIO.__init__(self, path, *args, **kwargs)
def read(self, size):
sys.stdout.write("\rUncompressing %d of %d MB (%d%%)" % (self.tell() / 1048576, self._total_size / 1048576, self.tell()*100/self._total_size))
sys.stdout.flush()
return io.FileIO.read(self, size)
try:
rh.logger.info("Extracting files from {}: ", (os.path.basename(filename)))
file_obj=ProgressFileObject(os.path.expanduser(filename))
tf = tarfile.open(fileobj=file_obj)
tf.extractall(path=os.path.expanduser(path))
file_obj.close()
except Exception as error_code:
rh.logger.info("Error: {}", error_code)
else:
tf.close()
print()
rh.logger.info("Extraction success. Don't forget to remove {} if you are not plenty of space.",
(os.path.basename(filename))) | 797c8a0073fdc73d9a42f5089bc38ff51d71ceb7 | 3,654,146 |
def index(request):
"""
User profile page.
"""
user = request.user
profile = user.userprofile
context = collect_view_data(request, 'profile')
context['user'] = user
context['profile'] = profile
context['uform'] = UserForm(request, request.user, init=True)
context['upform'] = UserProfileForm(request, profile, init=True)
context['pform'] = ChangePasswordForm(request.user)
context['sform'] = SSHKeyForm(request, request.user)
context['ssh_keys'] = request.user.usersshkey_set.all().order_by('id')
context['email_aform'] = EmailActivationProfileForm(profile.email_token)
context['phone_aform'] = PhoneActivationProfileForm(profile.phone_token)
return render(request, 'gui/profile/profile.html', context) | 1a8cc98ba476e21986f79ec8e662bb222df79fae | 3,654,147 |
def user_confirm_email(token):
"""Confirm a user account using his email address and a token to approve.
Parameters
----------
token : str
The token associated with an email address.
"""
try:
email = ts.loads(token, max_age=86400)
except Exception as e:
logger.error(str(e))
abort(404)
user = User.query.filter_by(email=email).one_or_none()
if user is None:
flash(
'You did not sign-up yet to RAMP. Please sign-up first.',
category='error'
)
return redirect(url_for('auth.sign_up'))
elif user.access_level in ('user', 'admin'):
flash(
"Your account is already approved. You don't need to confirm your "
"email address", category='error'
)
return redirect(url_for('auth.login'))
elif user.access_level == 'asked':
flash(
"Your email address already has been confirmed. You need to wait "
"for an approval from a RAMP administrator", category='error'
)
return redirect(url_for('general.index'))
User.query.filter_by(email=email).update({'access_level': 'asked'})
db.session.commit()
admin_users = User.query.filter_by(access_level='admin')
for admin in admin_users:
subject = 'Approve registration of {}'.format(
user.name
)
body = body_formatter_user(user)
url_approve = ('http://{}/sign_up/{}'
.format(app.config['DOMAIN_NAME'], user.name))
body += 'Click on the link to approve the registration '
body += 'of this user: {}'.format(url_approve)
send_mail(admin.email, subject, body)
flash(
"An email has been sent to the RAMP administrator(s) who will "
"approve your account"
)
return redirect(url_for('auth.login')) | 3f26a4872af9759165d0592ac8d966f2e27a9bf6 | 3,654,148 |
def num_zeros_end(num):
"""
Counts the number of zeros at the end
of the number 'num'.
"""
iszero = True
num_zeros = 0
i = len(num)-1
while (iszero == True) and (i != 0):
if num[i] == "0":
num_zeros += 1
elif num[i] != "0":
iszero = False
i -= 1
return num_zeros | f227cce65e26a0684a10755031a4aeff2156015a | 3,654,149 |
from typing import List
def batch_summarize(texts: List[str]) -> List[str]:
"""Summarizes the texts (local mode).
:param texts: The texts to summarize.
:type texts: List[str]
:return: The summarized texts.
:rtype: List[str]
"""
if _summarizer is None:
load_summarizer()
assert _summarizer is not None
tokenizer = get_summarizer_tokenizer()
prompts = [summarizer_prompt.format(text=text) for text in texts]
information = {
"prompt_length": max(len(tokenizer.encode(prompt)) for prompt in prompts)
}
parameters = format_parameters_to_local(summarizer_parameters, information)
response = _summarizer(prompts, **parameters)
return [
cut_on_stop(choices[0]["generated_text"], summarizer_parameters["stop"])
for choices in response
] | 7c05b8f612faab808fbeb1ef7c21f8b3b2487be5 | 3,654,150 |
def Vp_estimation(z, T, x, g=param.g):
""" Estimation of the Vp profile from the results of solving the system.
"""
DT = T - T[-1] # temperature variation in the layer compared to T[ICB]
drhoP = -param.rhoH**2. * g * z / Seismic_observations.calcK(z)
drhoT = -param.rhoH * param.alpha * DT # *(Mp*h+X*Mx)
rhoL = (param.rhoD - (1 - x[0]) * param.rhoH - drhoT[0] - drhoP[0]) / x[0]
# print rhoL
# rhoL2=x[0]/(1/(rhoD-drhoT[0]-drhoP[1])-(1-x[0])/rhoH)
# print rhoL
rho_new = x * rhoL + (1 - x) * param.rhoH + drhoT + drhoP
Vp_new = np.sqrt(Seismic_observations.calcK(z) / rho_new)
return rho_new, Vp_new | 4f1e1936cc98cfd84d87a651c8deac5bb7aa39e0 | 3,654,151 |
def pp_date(dt):
"""
Human-readable (i.e. pretty-print) dates, e.g. for spreadsheets:
See http://docs.python.org/tutorial/stdlib.html
e.g. 31-Oct-2011
"""
d = date_to_datetime(dt)
return d.strftime('%d-%b-%Y') | a6c8cd97785212afebb2b8948117815f5553dc24 | 3,654,152 |
import copy
import math
def optimizer_p(cd, path, i, obs, path_penalty):
"""Optimizer of the current path. Reduce the piece-wise path length in the free space of the environment."""
p_tmp = copy.deepcopy(path)
p_tmp[i].x = p_tmp[i].x + cd[0]
p_tmp[i].y = p_tmp[i].y + cd[1]
r1 = math.sqrt((p_tmp[i-1].x - p_tmp[i].x)**2+(p_tmp[i-1].y - p_tmp[i].y)**2)
r2 = math.sqrt((p_tmp[i+1].x - p_tmp[i].x)**2+(p_tmp[i+1].y - p_tmp[i].y)**2)
penalty1 = 0
penalty2 = 0
if obstacles:
for o in obs:
d1 = check_obst(p_tmp[i-1].x, p_tmp[i-1].y, p_tmp[i].x, p_tmp[i].y, o[0].x, o[0].y)
if d1< o[1]:
penalty1 = max(penalty1,(o[1] - d1)*path_penalty)
d2 = check_obst(p_tmp[i].x, p_tmp[i].y, p_tmp[i+1].x, p_tmp[i+1].y, o[0].x, o[0].y)
if d2 < o[1]:
penalty2 = max(penalty1,(o[1] - d1)*path_penalty)
return r1 + r2 + abs(r1-r2) + penalty1 + penalty2 | da126e3e7c0013748b1bc5b39c1b51aa2bf0d68b | 3,654,153 |
import base64
def create_message(sender_address, receiver_address , subject, email_content):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEText(email_content, 'html')
message['to'] = receiver_address
message['from'] = sender_address
message['subject'] = subject
# return {'raw': base64.urlsafe_b64encode(message.as_string())}
b64_bytes = base64.urlsafe_b64encode(message.as_bytes())
b64_string = b64_bytes.decode()
return {'raw': b64_string} | 3970272fda9650b5b59de9a57b2579374088b5c4 | 3,654,154 |
import functools
def handle_view_errors(func):
"""
view error handler wrapper
# TODO - raise user related errors here
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
debug: bool = current_app.config.get('DEBUG')
try:
return func(*args, **kwargs)
except ValueError as e:
message: str = str(e)
if debug:
print(message)
raise InputError(status=error_codes.input_error_code, description=message)
except TypeError as e:
message: str = str(e)
if debug:
print(e)
raise InputError(
status=error_codes.input_error_code, description=message)
except BadRequestError as e:
if debug:
print(e)
message: str = '''Bad Request: while connecting to database'''
raise RequestError(status=error_codes.bad_request_error_code, description=message)
except BadQueryError as e:
if debug:
print(e)
message: str = '''Database Query Error: Error while querying database please inform admin'''
raise DataServiceError(
status=error_codes.data_service_error_code, description=message)
except ConnectionRefusedError as e:
if debug:
print(e)
message: str = '''Connection Refused: Unable to connect to database please try again later'''
raise RequestError(status=error_codes.remote_data_error, description=message)
except RetryError as e:
if debug:
print(e)
message: str = '''Retries Exceeded: Unable to connect to database please try again later
or inform the administrator'''
raise RequestError(status=error_codes.remote_data_error, description=message)
except Aborted as e:
if debug:
print(e)
message: str = '''Abort Error: connection refused by remote server'''
raise RequestError(status=error_codes.remote_data_error, description=message)
return wrapper | 8687214b9ad659a19699c3563a9a5890358e4c71 | 3,654,155 |
def get_latest_revision_number(request, package_id):
""" returns the latest revision number for given package """
package = get_object_or_404(Package, id_number=package_id)
return HttpResponse(simplejson.dumps({
'revision_number': package.latest.revision_number})) | 3f8053656cbd7e08336a4632f1deaf43e58bc3eb | 3,654,156 |
from typing import Mapping
def _make_mesh_tensors(inputs: Mapping[K, np.ndarray]) -> Mapping[K, tf.Tensor]:
"""
Computes tensors that are the Cartesian product of the inputs.
This is around 20x faster than constructing this in Python.
Args:
inputs: A mapping from keys to NumPy arrays.
Returns:
A mapping from keys to a Tensor, which takes on values from the corresponding
input NumPy array. Computing the Tensors should yield NumPy arrays equal to
`{k: itertools.product(inputs.values())[i] for i, k in enumerate(inputs.keys())}`.
"""
# SOMEDAY(adam): messy, this would be much nicer in TF2 API
# SOMEDAY(adam): v.dtype may not always match the dtype expected by the models.
# e.g. `stable_baselines.common.input` always maps `MultiDiscrete` to `int32` even though
# Gym reports it as `int64`. The dtypes match with `Box` though, which is the only thing we
# need so far, so ignoring this (change should possibly be made in Stable Baselines).
phs = {k: tf.placeholder(v.dtype, shape=v.shape) for k, v in inputs.items()}
# Increase dimensions for broadcasting
# So first tensor will be a[:, None, ..., None],
# second tensor b[None, :, None, ..., None], ...,
# final tensor z[None, ..., None, :].
tensors = {}
for i, (k, ph) in enumerate(phs.items()):
t = ph
for j in range(len(phs)):
if i != j:
t = tf.expand_dims(t, axis=j)
tensors[k] = t
target_shape = tuple((len(v) for v in inputs.values()))
tensors = {
k: tf.broadcast_to(t, target_shape + inputs[k].shape[1:]) for k, t in tensors.items()
}
target_len = np.product(target_shape)
tensors = {k: tf.reshape(t, (target_len,) + inputs[k].shape[1:]) for k, t in tensors.items()}
handles = {k: tf.get_session_handle(t) for k, t in tensors.items()}
feed_dict = {ph: inputs[k] for k, ph in phs.items()}
return tf.get_default_session().run(handles, feed_dict=feed_dict) | 65a97e7f7d85668acd2af50ba9ed745190181018 | 3,654,157 |
def make_counters():
"""Creates all of the VariantCounters we want to track."""
def _gt_selector(*gt_types):
return lambda v: variantutils.genotype_type(v) in gt_types
return VariantCounters([
('All', lambda v: True),
('SNPs', variantutils.is_snp),
('Indels', variantutils.is_indel),
('BiAllelic', variantutils.is_biallelic),
('MultiAllelic', variantutils.is_multiallelic),
('HomRef', _gt_selector(variantutils.GenotypeType.hom_ref)),
('Het', _gt_selector(variantutils.GenotypeType.het)),
('HomAlt', _gt_selector(variantutils.GenotypeType.hom_var)),
('NonRef',
_gt_selector(variantutils.GenotypeType.het,
variantutils.GenotypeType.hom_var)),
]) | b7a943f045018556a2a5d0dbf5e093906d10242a | 3,654,158 |
def connect_registry_client():
"""
connect the module client for the Registry implementation we're using return the client object
"""
client = adapters.RegistryClient()
client.connect(environment.service_connection_string)
return client | f2e2bccb4cfacd86af36e3924463541d9e3dcdcd | 3,654,159 |
def get_group_average_score(gid=None, name=None):
"""
Get the average score of teams in a group.
Args:
gid: The group id
name: The group name
Returns:
The total score of the group
"""
group_scores = get_group_scores(gid=gid, name=name)
total_score = sum([entry['score'] for entry in group_scores])
return int(total_score / len(group_scores)) if len(group_scores) > 0 else 0 | cdea61e388b47f399fbc8e228e313d6199164b2f | 3,654,160 |
def solve_with_cdd_for_II(A, verbose=False):
"""This method finds II's minmax strategy for zero-sum game A"""
m = A.shape[0] # number of rows
n = A.shape[1] # number of columns
A = np.column_stack([[0]*m,-A,[1]*m])
I = np.eye(n)
nn = np.column_stack([[0]*n,I,[0]*n])
# non-negativity constraints
n1 = [-1] * n
n1.insert(0,1)
n1.append(0) # n1 = 1,-1,-1,...,-1,0]
n2 = [1] * n
n2.insert(0,-1)
n2.append(0) # n1 = 1,-1,-1,...,-1,0]
d = np.vstack([A,nn,n1,n2])
mat = cdd.Matrix(d.tolist(), number_type='fraction')
mat.obj_type = cdd.LPObjType.MIN
d = [0] * (n+1)
d.append(1) # [0,0,...0,1]
mat.obj_func = d
lp = cdd.LinProg(mat)
lp.solve()
lp.status == cdd.LPStatusType.OPTIMAL
# lp.primal_solution uses fractions, and has value as last entry, so that
# is dropped
p = [float(val) for val in lp.primal_solution[:-1]]
u = float(lp.obj_value)
if verbose:
print("------ Solved with cdd -------------")
print("Optimal strategy:", p)
print("Optimal payoff:", -u)
print("------------------------------------")
return p, -u | 87ac90691fcbbe2f89bf9090c31f86c165c007ed | 3,654,161 |
def build_none() -> KeySetNone:
"""Returns NONE."""
return KeySetNone() | 8ba38204cd763597c66d51466f5d2ffa5c9a19bf | 3,654,162 |
import csv
import numpy
def load_csv(file, shape=None, normalize=False):
"""
Load CSV file.
:param file: CSV file.
:type file: file like object
:param shape : data array is reshape to this shape.
:type shape: tuple of int
:return: numpy array
"""
value_list = []
for row in csv.reader(file):
value_list.append(map(float, row))
if shape is None:
return numpy.array(value_list)
else:
return numpy.array(value_list).reshape(shape) | 07f3b61bbdb6c9937f3cc4b0ae98fdfb7d8de48a | 3,654,163 |
from typing import Tuple
def flip_around_axis(
coords: np.ndarray,
axis: Tuple[float, float, float] = (0.2, 0.2, 0.2)
) -> np.ndarray:
"""Flips coordinates randomly w.r.t. each axis with its associated probability."""
for col in range(3):
if np.random.binomial(1, axis[col]):
coords[:, col] = np.negative(coords[:, col])
return coords | 914834a8492998b4e1e0b93e5e9677ec9af2d736 | 3,654,164 |
import math
def get_tc(name):
"""Determine the amount of tile columns to use."""
args = ["ffprobe", "-hide_banner", "-select_streams", "v", "-show_streams", name]
proc = sp.run(args, text=True, stdout=sp.PIPE, stderr=sp.DEVNULL)
lines = proc.stdout.splitlines()
d = {}
for ln in lines[1:-1]:
key, value = ln.strip().split("=")
d[key] = value
width = d["width"]
return math.floor(math.log2(math.ceil(float(width) / 64.0))) | ee917cd8cebfe7dc4ae718d883c657cf23bff1cf | 3,654,165 |
import os
import socket
def get_my_nodes():
"""Get nodes assigned to this host for computation
"""
if not os.path.exists("/etc/cluster-hosts"):
raise Exception("No cluster hosts specified")
#grab list of hosts in cluster, in order
hosts = []
with open("/etc/cluster-hosts", "r") as fp:
for line in fp:
hosts.append(line.strip())
d = Differ()
diffnodes = list(d.get_nodes())
#compute node->host assignments (round-robin)
assigns = dict()
dx = 0
for item in diffnodes:
assigns[item] = hosts[dx % len(hosts)]
dx += 1
myitems = []
fqdn = socket.getfqdn()
for (item, host) in assigns.items():
if host == fqdn:
myitems.append(item)
return myitems | 3f3f57a3239d71850f1ee72ab108255d5f152830 | 3,654,166 |
def cmap_hex_color(cmap, i):
"""
Convert a Colormap to hex color.
Parameters
----------
cmap : matplotlib.colors.ListedColormap
Represents the Colormap.
i : int
List color index.
Returns
-------
String
Represents corresponding hex string.
"""
return matplotlib.colors.rgb2hex(cmap(i)) | 9ac7753cde9470e3dd9fbd4a66373b25126635ca | 3,654,167 |
def train_folds(X, y, fold_count, batch_size, get_model_func):
""" K-Fold Cross-Validation for Keras Models
Inspired by PavelOstyakov
https://github.com/PavelOstyakov/toxic/blob/master/toxic/train_utils.py
"""
fold_size = len(X[0]) // fold_count
models = []
for fold_id in range(0, fold_count):
print('===== FOLD {} ====='.format(fold_id+1))
model = get_model_func()
model.compile()
RocAuc = RocAucEvaluation()
RocAuc.set_model(model)
model.fit(
X, y, validation_split=max(1/fold_count, 0.15),
batch_size=batch_size, epochs=20, shuffle=True,
add_callbacks=[RocAuc], verbose=1
)
models.append(model)
return models | 51a38243925c76ac6179a90be46be31fcb685054 | 3,654,168 |
import socket
import os, random, string
import json
def reset_password(request):
"""
View to handle password reset
"""
helper.log_entrace(logger,request)
postEmail = request.POST.get('email', '')
try:
user = User.objects.get(email = postEmail)
chars = string.ascii_letters + string.digits
random.seed = os.urandom(1024)
new_password = ''.join(random.choice(chars) for i in range(EPA_DEFAULT_PASSWORD_LENGTH))
user.set_password(new_password)
try:
send_mail(EPA_FORGOT_PASSWORD_EMAIL_TITLE, EPA_FORGOT_PASSWORD_EMAIL_BODY_TEMPLATE.replace('%username%', user.username)
.replace('%new_password%', new_password), EPA_FORGOT_PASSWORD_EMAIL_SENDER, [user.email])
user.save()
return HttpResponse(json.dumps([{'status': 'ok'}]))
except socket.error:
return HttpResponseServerError(json.dumps({'messages': ['Fail while try connecting to mail server']}))
except (ObjectDoesNotExist, DatabaseError) as e:
res = {"messages": ['User Not found']}
logger.error('Error in method view_prediction_data: ' + str(e.__class__) + e.message)
return HttpResponseNotFound(json.dumps(res)) | 50c8b1d058a168176b528bb9168d4e0e179a08c5 | 3,654,169 |
async def cancel(command: HALCommandType, script: str):
"""Cancels the execution of a script."""
try:
await command.actor.helpers.scripts.cancel(script)
except Exception as err:
command.warning(text=f"Error found while trying to cancel {script}.")
return command.fail(error=err)
return command.finish(f"Script {script} has been scheduled for cancellation.") | 438297845f5ba4ffc49b95a798adf61294371694 | 3,654,170 |
def get_nodes_by_betweenness_centrality(query_id, node_number):
"""Get a list of nodes with the top betweenness-centrality.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database query identifier
required: true
type: integer
- name: node_number
in: path
description: The number of top between-nodes to return
required: true
type: integer
"""
graph = manager.cu_get_graph_from_query_id_or_404(query_id)
if node_number > graph.number_of_nodes():
node_number = graph.number_of_nodes()
bw_dict = nx.betweenness_centrality(graph)
return jsonify([
node.md5
for node, score in sorted(bw_dict.items(), key=itemgetter(1), reverse=True)[:node_number]
]) | 44d97e443c6bef4d7048496674a2382a6c4f2ade | 3,654,171 |
import sympy
def preprocess(function):
"""
Converts a given function from type str to a Sympy object.
Keyword arguments:
function -- a string type representation of the user's math function
"""
expr = function
while True:
if '^' in expr:
expr = expr[:expr.index('^')] + '**' + expr[expr.index('^')+1:]
else:
break
expr = sympy.sympify(expr)
return expr | 001bd04d27db2afa4debbe776e5fe3cf1af1476d | 3,654,172 |
import re
def tot_changes(changes: str) -> int:
"""Add deletions and insertions."""
insertions_pat = re.compile(r"(\d+) insertion")
deletions_pat = re.compile(r"(\d+) deletion")
insertions = insertions_pat.search(changes)
insertions = int(insertions.group(1)) if insertions else 0
deletions = deletions_pat.search(changes)
deletions = int(deletions.group(1)) if deletions else 0
return insertions + deletions | 74742baf63db51b5c59b332f0104008500f330b9 | 3,654,173 |
def update_from_mcd(full_table, update_table):
# type: (pd.DataFrame, pd.DataFrame) -> pd.DataFrame
"""
Update the full table (aka the PDG extended-style table) with the
up-to-date information from the PDG .mcd file.
Example
-------
>>> new_table = update_from_mcd('mass_width_2008.fwf', 'mass_width_2021.mcd') # doctest: +SKIP
"""
full_table = full_table.copy()
full_table.update(update_table)
update_table_neg = update_table.copy()
update_table_neg.index = -update_table_neg.index
full_table.update(update_table_neg)
return full_table | c60daea5719445fb696ef21bbc0f233fea4e48cd | 3,654,174 |
import socket
def resolve_hostname(host):
"""Get IP address of hostname or URL."""
try:
parsed = urlparse.urlparse(host)
except AttributeError as err:
error = "Hostname `%s`is unparseable. Error: %s" % (host, err)
LOG.exception(error)
raise errors.SatoriInvalidNetloc(error)
# Domain names are in netloc, IP addresses fall into path
hostname = parsed.netloc or parsed.path
# socket.gaierror is not trapped here
address = socket.gethostbyname(hostname)
return address | 1792d943e490661b4dd42608f2b025096810688f | 3,654,175 |
import pandas as pd
import numpy as np
from .data_utils import keep_common_genes
from .data_utils import df_normalization
def DeconRNASeq_main(rna_df, sig_df, patient_IDs='ALL', args={}):
"""
This function does the following:
- parses the dictionary 'args' for the arguments to pass on to the DeconRNASeq method.
- eliminates genes from rna_df and sig_df that are not present in both data sets
- Runs DeconRNASeq() for each patient specified in patient_IDs' argument
- Combines the resulting frequencies into a pandas dataframe (num_celltypes x num_patients)
Inputs:
- rna_df: pandas df of rna gene expression data.
Rows are genes (indexed by 'Hugo_Symbol') and columns are patients
- sig_df: pandas df of Signature gene expression values for given cell types.
Rows are genes (indexed by 'Hugo_Symbol') and columns are cell types
- patient_IDs: list of patient IDs to run DeconRNASeq for.
Alternatively, can use the string 'ALL' to run for all patients
- args: dictionary containing any of the following:
- check_sig: boolean, whether or not to check the condition number of the signature matrix
- scaling: string, must be either 'None', 'zscore', or 'minmax'. Determines how to scale the signature matrix and mixture data before solving for optimal x
- scaling_axis: 0 or 1. Whether to scale mixture data and signature matrix by normalizing each column (celltype/patient) separately (scaling_axis=0) or each row (gene) separately (scaling_axis=1).
- formulation: see DeconRNASeq()
- reg_constant: see DeconRNASeq()
- print_result: see DeconRNASeq()
Outputs:
- cell_freqs: pandas df. Contains cell type frequencies for each patient in 'patient_IDs' list.
Rows are indexed by cell type, columns are patient IDs
"""
# Read in optional arguments, or set them as default
# Assert values are of the right data type when passed to DeconRNASeq() function.
# formulation must be 'qp', 'ridge', or 'lasso'
if 'formulation' in args.keys():
formulation = args['formulation']
if formulation not in ['qp','ridge','lasso']:
raise ValueError("Formulation ({!r}) must be set to 'qp', 'ridge', or 'lasso'".format(formulation))
else:
formulation = 'qp'
# reg_constant must be a double
if 'reg_constant' in args.keys():
reg_constant = args['reg_constant']
else:
reg_constant = 1.0
if 'check_sig' in args.keys():
check_sig = args['check_sig']
if not isinstance(check_sig, bool):
raise ValueError("check_sig ({!r}) must be a boolean variable".format(check_sig))
else:
check_sig = False
if 'scaling' in args.keys():
scaling = args['scaling']
if scaling not in ['None', 'none', 'zscore', 'minmax', 'r-zscore']:
raise ValueError("scaling ({!r}) must be set to 'none', 'zscore' or 'minmax'".format(scaling))
else:
scaling = 'minmax'
if 'scaling_axis' in args.keys():
scaling_axis = args['scaling_axis']
if scaling_axis not in [0, 1]:
raise ValueError("scaling_axis ({!r}) must be 0 or 1".format(scaling_axis))
else:
scaling_axis = 0
if 'print_results' in args.keys():
print_results = args['print_results']
if not isinstance(print_results, bool):
raise ValueError("print_results ({!r}) must be a boolean variable".format(print_results))
else:
print_results = False
# eliminate genes not present in both rna and sig dfs, and ensure they are in the same order:
rna_df, sig_df = keep_common_genes(rna_df, sig_df)
# Scale Data:
if scaling in ['zscore', 'minmax', 'r-zscore']:
# R implementation uses zscore scaling.
sig_df = df_normalization(sig_df, scaling=scaling, axis=scaling_axis)
rna_df = df_normalization(rna_df, scaling=scaling, axis=scaling_axis)
# Convert signature to numpy array
Sig = np.array(sig_df)
# Check the condition number of the signature matrix:
if check_sig:
print("Condition number of signature matrix =", np.linalg.cond(Sig))
# Select a patient / list of patients to solve for their cell type frequencies:
# Patient_ID must be 'ALL' or an array of specific patient IDs.
if patient_IDs == 'ALL':
patient_list = rna_df.columns
elif not isinstance(patient_IDs, type([])):
raise ValueError("patient_IDs should be either 'ALL', or an array of IDs (not a single ID)")
else:
patient_list = patient_IDs
# For each patient, run DeconRNASeq to get cell type frequencies, and save results to pandas df:
print("Running DeconRNASeq...")
cell_freqs_df = pd.DataFrame()
cell_freqs_df['Patient_ID'] = sig_df.columns
cell_freqs_df = cell_freqs_df.set_index(['Patient_ID'])
for patient in patient_list:
if patient in rna_df.columns:
Mix = np.array(rna_df[patient])
cell_freqs_df[patient] = DeconRNASeq(Sig, Mix, formulation=formulation, reg_constant=reg_constant, print_results=print_results, label=patient)
else:
raise ValueError("patient_ID ({!r}) not present in rna dataframe".format(patient))
cell_freqs_df = cell_freqs_df.transpose()
return cell_freqs_df | 44bf01b0d53110610d3219e3002cca0ab35720b5 | 3,654,176 |
import re
from typing import OrderedDict
def parse_c_interface(c_interface_file):
"""
@brief Parses a c-interface file and generates a dictionary of function names to parameter lists.
Exported functions are expected to be preceded by 'DLL_EXPORT'. Python keywords should not be used as variable
names for the function names in the cpp-interface file. If a Python wrapper function shall return the output buffer,
the corresponding parameter has to be preceded by the _OUT_BUFFER_KEYWORD in the C++ file. In this case, we assume
the parameter is a numpy array. The shape and the dtype will be taken from the first input parameter.
"""
_OUT_BUFFER_KEYWORD = "OUT"
with open(c_interface_file, "r") as f:
# read file and remove comments
content = "\n".join([c.split("//")[0] for c in re.sub("/\*.*?\*/", "", f.read(), flags=re.DOTALL).split("\n")])
function_signatures = [x for x in re.findall("DLL_EXPORT.+?\)", content, flags=re.DOTALL)]
function_dict = OrderedDict()
for sig in function_signatures:
params_regex = re.compile("\(.*?\)", flags=re.DOTALL)
# find function name
wo_params = re.sub(params_regex, "", sig)
tokens = re.split("\s", wo_params)
name = tokens[-1]
function_dict[name] = dict()
# find return type and initialize dict
function_dict[name] = {"restype": " ".join(tokens[1:-1]), "params": [], "out_buffers": []}
# find parameters, remove template specifiers, and split at commas
param_fields = re.sub("<.*?>", "", re.search(params_regex, sig).group(0)[1:-1]).split(",")
out_buffer_indices = [i for i, s in enumerate(param_fields)
if _OUT_BUFFER_KEYWORD in [x.strip() for x in s.split(" ")]]
name_position = -1 # last position in C++ should contain the name of the variable
try:
all_parameters = [re.search("[A-Za-z0-9_]+", x[name_position].strip()).group(0)
for x in (re.split("\s", s) for s in param_fields)]
for i, p in enumerate(all_parameters):
if i in out_buffer_indices:
function_dict[name]["out_buffers"].append(p)
else:
function_dict[name]["params"].append(p)
except AttributeError:
pass
return function_dict | 06a4edb40e12343cda688da82c9042d1342e6429 | 3,654,177 |
def con_minimize(fun, bounds, constr=(), x0=None, args=(),
callback=None, options={}, workers=None):
"""Constrained minimization of `fun` using Genetic Algorithm.
This function is a wrapper over modetga.minimize().
The constraints are defined as a tuple of functions
(`fcon1(x, *args)`, `fcon2(x, *args)`, `...`).
The algorithm searches for a solution minimizing
`fun(x, *args)` and satisfying the conditions
(`fcon1(x, *args) >= 0`, `fcon2(x, *args) >= 0`, `...`).
`callback` arguments: `x`, `fx`, `ng`, `*args`.
`fx` is the function value at the generation `ng`.
Returns an optimization result object with the following attributes:
- x - numpy 1D array, optimized parameters,
- message - str, exit message,
- ng - int, number of generations,
- fx - float, final function value.
:param fun: function to be minimized
:param bounds: tuple, parameter bounds
:param constr: tuple, functions defining constraints
:param x0: numpy 1D array, initial parameters
:param args: tuple, positional arguments to be passed to `fun` and to `fcon`
:param callback: function, called after every generation
:param options: dict, GA options
:param workers: int, number of processes to use (will use all CPUs if None)
:return: OptRes, optimization result
"""
# Wrap cost function with constraints
def fun_soft_con(x, *augmented_args):
# Unpack constraints and arguments
fcore = augmented_args[0] # Function to be minimized
fcons = augmented_args[1] # Constraints
user_args = augmented_args[2:] # Arguments
# Evaluate core function
ycore = fcore(x, *user_args)
# Initialize penalty
penalty = 0.
# Update penalty
# (the more negative fcon() is, the higher penalty)
for f in fcons:
ycon = np.max([f(x, *user_args) * -1., 0.])
pscale = ycore / (ycon + 1e-6)
penalty += ycon * pscale
return ycore + penalty
# Run minimization
augmented_args = (fun, constr, *args)
res = minimize(
fun=fun_soft_con,
bounds=bounds,
x0=x0,
args=augmented_args,
callback=callback,
options=options,
workers=workers)
# Extend result with contraint violation info
res.constr = [fcon(res.x, *args) for fcon in constr]
return res | 46a7400953e54dfb9b2364832e6029a508acc9de | 3,654,178 |
def unique_v2(lst):
"""
Returns a list of all unique elements in the input list "lst."
This algorithm runs in o(n), as it only passes through the list "lst" twice
"""
dd = defaultdict(int) # avoids blank dictionary problem (KeyError when accessing nonexistent entries)
unique_list = []
for val in lst:
dd[val] += 1
for val in lst:
if dd[val] == 1:
unique_list.append(val)
return unique_list | d7c5706908d569b3ee93ba1bebbd09bc6f335ad2 | 3,654,179 |
import ipaddress
def is_ip_network(network, strict=False):
"""Returns True/False if a string is a valid network."""
network = str(network)
try:
ipaddress.ip_network(network, strict)
return True
except ValueError:
return False | 84206586412b76816fa845a75fc6c121bfdf0989 | 3,654,180 |
def assign_point_of_contact(point_of_contact):
"""
Assign a user to be the point of contact in emails/letters
:param point_of_contact: A string containing the user_guid if point of contact has been set for a request
:return: A User object to be designated as the point of contact for a request
"""
if point_of_contact:
return Users.query.filter(Users.guid == point_of_contact).one_or_none()
else:
return current_user | 99f2e7d036c4f7cf71be2bd6b82a313f26b3af41 | 3,654,181 |
def response_with_pagination(guests, previous, nex, count):
"""
Make a http response for GuestList get requests.
:param count: Pagination Total
:param nex: Next page Url if it exists
:param previous: Previous page Url if it exists
:param guests: Guest
:return: Http Json response
"""
return make_response(jsonify({
'status': 'success',
'previous': previous,
'next': nex,
'count': count,
'guests': guests
})), 200 | 00373c866b6cc8384a88e62b63fcaa5950ccc1c1 | 3,654,182 |
def put_object(request, old_pid):
"""MNStorage.update(session, pid, object, newPid, sysmeta) → Identifier."""
if django.conf.settings.REQUIRE_WHITELIST_FOR_UPDATE:
d1_gmn.app.auth.assert_create_update_delete_permission(request)
d1_gmn.app.util.coerce_put_post(request)
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (("field", "newPid"), ("file", "object"), ("file", "sysmeta"))
)
d1_gmn.app.views.assert_db.is_valid_pid_to_be_updated(old_pid)
sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES["sysmeta"])
new_pid = request.POST["newPid"]
d1_gmn.app.views.assert_sysmeta.matches_url_pid(sysmeta_pyxb, new_pid)
d1_gmn.app.views.assert_sysmeta.obsoletes_matches_pid_if_specified(
sysmeta_pyxb, old_pid
)
sysmeta_pyxb.obsoletes = old_pid
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, "seriesId")
d1_gmn.app.views.assert_sysmeta.is_valid_sid_for_chain(old_pid, sid)
d1_gmn.app.views.create.create_sciobj(request, sysmeta_pyxb)
# The create event for the new object is added in create_sciobj(). The update
# event on the old object is added here.
d1_gmn.app.event_log.log_update_event(
old_pid,
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
),
)
d1_gmn.app.sysmeta.update_modified_timestamp(old_pid)
return new_pid | 192ed2a7efc35baf28605de9db594319370f294d | 3,654,183 |
def _match_gelu_pattern(gf, entry_node):
""" Return the nodes that form the subgraph of a GELU layer
"""
try:
if not len(entry_node.outputs) == 3:
return None
pow_1, add_2, mul_3 = [gf[x] for x in entry_node.outputs]
if not (pow_1.op == 'Pow' and add_2.op == 'Add' and mul_3.op == 'Mul'):
return None
const_4 = gf[pow_1.inputs[1]]
if not (const_4.op == 'Const' and int(round(const_4.value.val)) == 3):
return None
mul_5 = gf[pow_1.outputs[0]]
const_6 = gf[mul_5.inputs[0]]
if not (const_6.op == 'Const' and \
abs(const_6.value.val - 0.0447) < 1e-3):
return None
if not (gf[add_2.inputs[0]] == entry_node and \
gf[add_2.inputs[1]] == mul_5):
return None
mul_7 = gf[add_2.outputs[0]]
const_8 = gf[mul_7.inputs[0]]
if not abs(const_8.value.val - np.sqrt(2 / np.pi)) < 1e-3:
return None
tanh_9 = gf[mul_7.outputs[0]]
add_10 = gf[tanh_9.outputs[0]]
const_11 = gf[add_10.inputs[0]]
if not (tanh_9.op == 'Tanh' and add_10.op == 'Add' and \
const_11.op == 'Const' and int(round(const_11.value.val)) == 1):
return None
mul_12 = gf[add_10.outputs[0]]
const_13 = gf[mul_12.inputs[0]]
if not (mul_12.op == 'Mul' and const_13.op == 'Const' and \
abs(const_13.value.val - 0.5) < 1e-3):
return None
if not (gf[mul_3.inputs[0]] == entry_node and \
gf[mul_3.inputs[1]] == mul_12):
return None
gelu_nodes = [pow_1, add_2, mul_3, const_4, mul_5, const_6, mul_7,
const_8, tanh_9, add_10, const_11, mul_12, const_13]
return gelu_nodes
except:
return None | 6e08578a9cb9bea96c939a4fbee31003d6c575d4 | 3,654,184 |
def assign_obs_error(param, truth_mag, band, run):
"""
Assign errors to Object catalog quantities
Returns
-------
obs_err : float or np.array
The error values in units defined in get_astrometric_error(), get_photometric_error
err_type : str
Type of observational error
"""
if param in ['ra_offset', 'dec_offset', 'Ixx_sqrt', 'Iyy_sqrt', 'x', 'y_obs',]:
obs_err = get_astrometric_error(truth_mag, band=band)
err_type = 'astrometric'
elif param in ['Ixy', 'IxxPSF', 'IxyPSF', 'IyyPSF',]:
# \delta(x^2) = \delta(x) \times 2x
obs_err = 2.0*param_val*get_astrometric_error(truth_mag, band=band)
err_type = 'astrometric'
elif 'Flux' in param: # flux columns
obs_err = get_photometric_error(truth_mag, band=band, run=run)
err_type = 'photometric'
elif param == 'extendedness':
obs_err = np.zeros_like(param_val)
err_type = 'N/A'
else:
raise NotImplementedError
return obs_err, err_type | 9a90b80755941ac19cbf023f7ee63f4650518242 | 3,654,185 |
from typing import List
from typing import Tuple
import tokenize
def dir_frequency(dirname: str, amount=50) -> List[Tuple[str, int]]:
"""Pipeline of word_frequency from a directory of raw input file."""
md_list = md.collect_md_text(dirname)
return compute_frequency(tokenize(normalize(" ".join(md_list))), amount) | 3daddb1930e80235887b51ed5918e9d7cb1fff71 | 3,654,186 |
def test_solver1(N, version='scalar'):
"""
Very simple test case.
Store the solution at every N time level.
"""
def I(x): return sin(2*x*pi/L)
def f(x,t): return 0
solutions = []
# Need time_level_counter as global variable since
# it is assigned in the action function (that makes
# a variable local to that block otherwise).
# The manager class below provides a cleaner solution.
global time_level_counter
time_level_counter = 0
def action(u, t, x):
global time_level_counter
if time_level_counter % N == 0:
solutions.append(u.copy())
time_level_counter += 1
n = 100; tstop = 6; L = 10
dt, x, cpu = solver(I, f, 1.0, lambda t: 0, lambda t: 0,
L, n, 0, tstop,
user_action=action, version=version)
print 'CPU time:', cpu
print 'Max value in final u:', arrmax(solutions[-1]) | 7c74b3f731c0aa613c7a9f8da82533c1239a574f | 3,654,187 |
import requests
def get_auth_data():
"""
Create auth data.
Returns:
return: access token and token expiring time.
"""
payload = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'client_credentials',
}
api_url = '{0}/oauth/access_token'.format(API_BASE_URL)
response = requests.post(url=api_url, data=payload)
response.raise_for_status()
auth_data = response.json()
return auth_data['access_token'], auth_data['expires_in'] | ecd921c1ef3639c388111ec5952c887867076d99 | 3,654,188 |
def datatable(module, tag):
"""Mapping for DataTable."""
if tag == "DataTable":
return module, tag | 1eaa06771ecdd99dfa102ec249b23db3999b6fd7 | 3,654,189 |
def remove_prepending(seq):
"""
Method to remove prepending ASs from AS path.
"""
last_add = None
new_seq = []
for x in seq:
if last_add != x:
last_add = x
new_seq.append(x)
is_loopy = False
if len(set(seq)) != len(new_seq):
is_loopy = True
# raise Exception('Routing Loop: {}'.format(seq))
return new_seq, is_loopy | 78bb1554678af0998e15ecf9ed8f4e379ac2e2ad | 3,654,190 |
def github_handle_error(e):
"""
Handles an error from the Github API
an error example: Error in API call [401] - Unauthorized
{"message": "Bad credentials", "documentation_url": "https://docs.github.com/rest"}
The error might contain error_code, error_reason and error_message
The error_reason and error_message might be the same but usually, the error_reason adds more information that
the error_message doesn't provide
examples:
error_code = 401
error_message = 'Bad credentials'
error_reason = 'Unauthorized'
:param e: the client object
:return: error_code and error_message
"""
try:
error_code = ""
error_message = str(e)
if e.__class__ is DemistoException and e.res is not None:
error_res = e.res
if isinstance(error_res, dict):
error_code = str(error_res.get("status"))
error_message = str(error_res.get("detail"))
else:
error_code = e.res.status_code
if not e.res.ok:
if e.res.json():
error_message = error_res.json().get("message", "")
if not error_message:
error_message = error_res.json().get("detail", "")
error_reason = error_res.reason
if error_reason and error_reason != error_message:
error_message += f' {error_reason}'
return error_code, error_message
except Exception as e:
error_code = ""
error_message = str(e)
return error_code, error_message | 1b3d7ef6756c02d7bf1b8db506dbf926dd3e6abd | 3,654,191 |
def netmask_to_bits(net_mask):
""" Convert netmask to bits
Args:
net_mask ('str'): Net mask IP address
ex.) net_mask = '255.255.255.255'
Raise:
None
Returns:
Net mask bits
"""
return IPAddress(net_mask).netmask_bits() | 7ecc069e14242ebffd840b989331a431f6c2ecbc | 3,654,192 |
def register_corrector(cls=None, *, name=None):
"""A decorator for registering corrector classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _CORRECTORS:
raise ValueError(f'Already registered model with name: {local_name}')
_CORRECTORS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls) | 90795496caff7958af52bbe1518582a2a2ceea73 | 3,654,193 |
import numpy
def _sample_perc_from_list(lst, perc=100, algorithm="cum_rand", random_state=None):
"""
Sample randomly a certain percentage of items from the given
list. The original order of the items is kept.
:param lst: list, shape = (n,), input items
:param perc: scalar, percentage to sample
:param algorithm: string, which algorithm should be used
"random": Decide for each item to be chosen or not. This
algorithm runs in linear time O(n), but
the percentages might not match exactly.
"cum_rand": O(n log(n) + perc)
:return: list
"""
if perc >= 100:
return lst
if perc <= 0:
return []
# Store old random state and set random state
rs_old = numpy.random.get_state()
numpy.random.seed(random_state)
if algorithm == "random":
lst_sub = [it for it in lst if numpy.random.uniform(high=100) <= perc]
elif algorithm == "cum_rand":
n = len(lst)
n_perc = numpy.round(n * perc / 100.0)
rank_its = numpy.argsort(numpy.random.uniform(size=n))
lst_sub = []
for idx, it in enumerate(lst):
if rank_its[idx] < n_perc:
lst_sub.append(it)
if len(lst_sub) > n_perc:
break
else:
raise ValueError("Invalid sampling algorithm: %s." % algorithm)
# Restore old random stat
numpy.random.set_state(rs_old)
return lst_sub | 4ec000e9bd8f5e10550040e49018e2a045659397 | 3,654,194 |
def irods_setacls(path, acl_list, verbose=False):
"""
This function will add the ACLs listed in 'acl_list'
to the collection or data object at 'path'.
'acl_list' is a list where each element itself is
a list consisting of the username in name#zone format,
and the access level ('read', 'write', 'own', or 'null').
Access type 'null' removes all ACLs for that user/group.
Note. On an error return, some of the ACLs might have
been applied. The function does not "roll back" on error.
Returns 0 on success, non-zero on error.
"""
if not path or not acl_list:
return 1
for acl in acl_list:
(rc, output) = shell_command(['ichmod', acl[1], acl[0], path])
if rc:
if verbose:
print("Error running 'ichmod %s %s %s': rc = %d:"
% (acl[1], acl[0], path, rc))
print output[1]
return rc
return 0 | 5727d6ff96e2d693323d5d88ed81eafbd4de0435 | 3,654,195 |
from datetime import datetime
def add_years(date_to_change, years):
"""
Return a date that's `years` years after the date (or datetime)
object `date_to_change`. Return the same calendar date (month and day) in the
destination year, if it exists, otherwise use the following day
(thus changing February 29 to March 1).
Args:
date_to_change (date): The date that we're adding years to.
years ([type]): The number of years to add.
Returns:
[date]: The provided date + one year.
"""
try:
return date_to_change.replace(year=date_to_change.year + years)
except ValueError:
return date_to_change + (
datetime.date(date_to_change.year + years, 1, 1)
- datetime.date(date_to_change.year, 1, 1)
) | e9b71d190f7629a3edc0902d0582005a26a33956 | 3,654,196 |
def concurrency_update_done(client, function_name, qualifier):
"""wait fn for ProvisionedConcurrencyConfig 'Status'"""
def _concurrency_update_done():
status = client.get_provisioned_concurrency_config(
FunctionName=function_name, Qualifier=qualifier
)["Status"]
if status == "FAILED":
raise ShortCircuitWaitException(f"Concurrency update failed: {status=}")
else:
return status == "READY"
return _concurrency_update_done | 4d168e4e9648c3a3d8cb149aad1e835362bd271a | 3,654,197 |
def googleapis_email(url, params):
"""Loads user data from googleapis service, only email so far as it's
described in http://sites.google.com/site/oauthgoog/Home/emaildisplayscope
Parameters must be passed in queryset and Authorization header as described
on Google OAuth documentation at:
http://groups.google.com/group/oauth/browse_thread/thread/d15add9beb418ebc
and: http://code.google.com/apis/accounts/docs/OAuth2.html#CallingAnAPI
"""
request = Request(url + '?' + params, headers={'Authorization': params})
try:
return simplejson.loads(dsa_urlopen(request).read())['data']
except (ValueError, KeyError, IOError):
return None | c6123e367f093a512ac17797da487e733503dc11 | 3,654,198 |
def _compose_query_string(ctx, query_string, **args):
"""
Return the SQL for an ad-hoc named query on the given context.
NOTE: This is a debug ONLY method, do NOT use this in production code.
"""
query = _construct_adhoc_query(ctx, query_string, **args)
wrapped_ctx = _CtxWrapper.wrap(ctx)
assert wrapped_ctx.current_conn != None
return query.sql(wrapped_ctx.current_conn, args, _debugging=True) | b64d17d11b8b9947eac9c59510254d33018d519b | 3,654,199 |
Subsets and Splits