content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def reference_info(ref_img=None):
"""Check if reference image exists and provide information to users."""
if ref_img:
if not os.path.isfile(ref_img):
raise ValueError("""Reference image doesn't exist.
Check --reference_image.""")
else:
logger.debug("Using reference image: %s.", os.path.abspath(ref_img))
else:
logger.debug("Reference image not provided.") | 5,354,300 |
def has_no_duplicates(input_):
"""Check that a list contains no duplicates.
For example:
['aa', 'bb', 'cc'] is valid.
['aa', 'bb', 'aa'] is not valid. The word aa appears more than once.
"""
return len(input_) == len(set(input_)) | 5,354,301 |
def test_create_existing(mp_tmpdir):
"""Run mapchete create and execute."""
temp_mapchete = os.path.join(mp_tmpdir, "temp.mapchete")
temp_process = os.path.join(mp_tmpdir, "temp.py")
out_format = "GTiff"
# create files from template
args = [
"create",
temp_mapchete,
temp_process,
out_format,
"--pyramid-type",
"geodetic",
]
run_cli(args)
# try to create again
with pytest.raises((IOError, OSError)): # for python 2 and 3
run_cli(args, expected_exit_code=-1) | 5,354,302 |
def process_mean_plots(E, modelconfig, datakey, orientation, mean_name):
"""Generates latitudal / lontitudal / monthly mean plots.
Seasonal values are extracted for lat/lon plots. """
experiment = 'SouthernHemisphere'
plot_dir = E.get_plot_dir()
verbosity = E.get_verbosity()
areas = []
plot_grid = modelconfig.getboolean('general', 'plot_background_grid')
# Extract required keys based on datakey
if (orientation == 'monthly'):
if (modelconfig.getboolean('general', 'plot_monthly_averages')):
areas += modelconfig.get(experiment, 'areas').split()
if (modelconfig.getboolean('general', 'plot_sub_areas')):
areas += modelconfig.get(experiment, 'sub_areas').split()
seasons = ['monthly']
else:
areas += modelconfig.get(experiment, 'areas').split()
seasons = modelconfig.get(experiment, 'seasons').split()
# Observations location
obsmodel, obs_loc, models = E.get_clim_model_and_obs_filenames(datakey)
obsfile = nc.Dataset(obs_loc, 'r')
# A-laue_ax+
E.add_to_filelist(obs_loc)
# A-laue_ax-
scale_cloud = False
# Ugly fix for scaling cloud ice/liquid water path values
if (datakey == 'clivi' or datakey == 'clwvi'):
scale_cloud = True
scale = 1E3
scale_str = ' * 1E3'
for area in areas:
# Value configurations
area_key = experiment + '_' + area
lat_min = modelconfig.getint(area_key, 'lat_min')
lat_max = modelconfig.getint(area_key, 'lat_max')
lon_min = modelconfig.getint(area_key, 'lon_min')
lon_max = modelconfig.getint(area_key, 'lon_max')
lat_area = E.get_ticks_labels(np.array([lat_min, lat_max]), 'lats')
# First extract the observations and then start looping over seasons
obslats, obslons, obsdata = E.get_model_data(modelconfig,
experiment,
area,
datakey,
obsfile)
for season in seasons:
# Plot layout configuration
plt.clf()
fig, axs = plt.subplots(2, 1, figsize=(15, 10))
fig.subplots_adjust(top=0.9)
fig.subplots_adjust(right=0.67)
fig.subplots_adjust(hspace=0.35)
if (orientation == 'lats'):
odata = E.extract_seasonal_mean_values(modelconfig,
obsdata,
experiment,
season,
monthly=True)
odata = E.average_data(odata, 1)
xobs = obslats
elif (orientation == 'lons'):
odata = E.extract_seasonal_mean_values(modelconfig,
obsdata,
experiment,
season,
monthly=True)
odata = E.average_data(odata, 2)
xobs = obslons
elif (orientation == 'monthly'):
odata = E.average_data(obsdata, 'monthly')
xobs = np.arange(0, 12, 1)
# Bad cloud values fix
if (scale_cloud):
odata = odata * scale
# Plot model values to first graph
ocolor, odashes, owidth = E.get_model_plot_style(obsmodel)
if (len(odashes) == 0):
axs[0].plot(xobs,
odata,
color=ocolor,
linewidth=owidth,
label=obsmodel + ' (obs)')
else:
line1, = axs[0].plot(xobs,
odata,
'--',
color=ocolor,
linewidth=owidth,
label=obsmodel + ' (obs)')
line1.set_dashes(odashes)
multimodelmean_initialized = False
for model in models:
# Read in model specific data
datafile = nc.Dataset(models[model], 'r')
# A-laue_ax+
E.add_to_filelist(models[model])
# A-laue_ax-
data_units = datafile.variables[datakey].units
if (orientation == 'monthly'):
lats, lons, data = E.get_model_data(modelconfig,
experiment,
area,
datakey,
datafile)
else:
lats, lons, data = E.get_model_data(modelconfig,
experiment,
area,
datakey,
datafile,
extend=orientation)
datafile.close()
# Bad cloud values fix
if (scale_cloud):
data = data * scale
if (data_units == 'kg m-2'):
data_units = r'$\mu$m'
else:
data_units += scale_str
# Process depending on orientation
if (orientation == 'lats'):
data = E.extract_seasonal_mean_values(modelconfig,
data,
experiment,
season,
monthly=True)
data = E.average_data(data, 1)
xdata = lats
interpolate = interp1d(xdata, data, kind='cubic', bounds_error=False)
idata = interpolate(xobs)
elif (orientation == 'lons'):
data = E.extract_seasonal_mean_values(modelconfig,
data,
experiment,
season,
monthly=True)
data = E.average_data(data, 2)
xdata = lons
interpolate = interp1d(xdata, data, kind='cubic', bounds_error=False)
idata = interpolate(xobs)
elif (orientation == 'monthly'):
data = E.average_data(data, 'monthly')
xdata = xobs
idata = data
# plotting custom dashes requires some extra effort (else)
# with empty dashes the format is default
rmse = round(np.mean((idata - odata) ** 2) ** 0.5, 1)
mlabel = model + " (RMSE: " + str(rmse) + ")"
color, dashes, width = E.get_model_plot_style(model)
if (len(dashes) == 0):
axs[0].plot(xdata,
data, color=color,
linewidth=width,
label=mlabel)
axs[1].plot(xobs,
idata - odata,
color=color,
linewidth=width,
label=mlabel)
else:
line1, = axs[0].plot(xdata,
data,
'--',
color=color,
linewidth=width,
label=mlabel)
line1.set_dashes(dashes)
line2, = axs[1].plot(xobs,
idata - odata,
color=color,
linewidth=width,
label=mlabel)
line2.set_dashes(dashes)
# Next the multimodel mean. We can just sum it up and divide
if not (multimodelmean_initialized):
multimodelmean_initialized = True
mmmean = idata
else:
mmmean += idata
# Plot the multimodel mean out if required
if (len(models) > 1):
mmmean = mmmean / len(models)
rmse = round(np.mean((mmmean - odata) ** 2) ** 0.5, 1)
mlabel = "Multimodel mean (RMSE: " + str(rmse) + ")"
color, dashes, width = E.get_model_plot_style('model_mean')
if (len(dashes) == 0):
axs[0].plot(xobs,
mmmean,
color=color,
linewidth=width,
label=mlabel)
axs[1].plot(xobs,
mmmean - odata,
color=color,
linewidth=width,
label=mlabel)
else:
line1, = axs[0].plot(xobs,
mmmean,
'--',
color=color,
linewidth=width,
label=mlabel)
line1.set_dashes(dashes)
line2, = axs[1].plot(xobs,
mmmean - odata,
color=color,
linewidth=width,
label=mlabel)
line2.set_dashes(dashes)
# Now to make the plot pretty i.e. headers, ticks, title etc.
suptitle = E.get_title_basename(datakey)
specifier = mean_name
if (orientation == 'lats'):
title = "Latitudinal mean"
specifier += '-latitudinal-mean'
x_min = lat_min
x_max = lat_max
xticks = E.get_ticks(8, np.array([x_min, x_max]))
lon_info = E.get_ticks_labels(np.array([lon_min, lon_max]),
'lons')
if (int(lon_max) == 360):
suptitle += " (lons [" + lon_info[0] + ":360" + "])"
else:
suptitle += " (lons [" + lon_info[0] + ":" + lon_info[1] + "])"
elif (orientation == 'lons'):
title = "Longitudinal mean"
specifier += '-longitudinal-mean'
x_min = lon_min
x_max = lon_max
xticks = E.get_ticks(8, np.array([x_min, x_max]))
lat_info = E.get_ticks_labels(np.array([lat_min, lat_max]),
'lats')
suptitle += " (lats [" + lat_info[0] + ":" + lat_info[1] + "])"
elif (orientation == 'monthly'):
title = "Monthly mean"
specifier += '-yearly-cycle-' + area
x_min = 0
x_max = 11
xticks = xobs
lat_info = E.get_ticks_labels(np.array([lat_min, lat_max]),
'lats')
lon_info = E.get_ticks_labels(np.array([lon_min, lon_max]),
'lons')
suptitle += " (lats [" + lat_info[0] + ":" + lat_info[1] + "], "
if (lon_max == 360):
suptitle += " lons [" + lon_info[0] + ":360" + "])"
else:
suptitle += " lons [" + lon_info[0] + ":" + lon_info[1] + "])"
if (season == 'monthly'):
pass
else:
suptitle += " for " + season
specifier = specifier + "-" + season
plt.suptitle(suptitle, fontsize=20)
labels = E.get_ticks_labels(xticks, orientation)
axs[0].grid(plot_grid)
axs[0].set_xlim(x_min, x_max)
axs[0].xaxis.set_ticks(xticks)
axs[0].set_xticklabels(labels)
axs[0].locator_params(axis='y', nbins=5)
axs[0].set_title(title + " [" + data_units + "]")
axs[1].grid(plot_grid)
axs[1].set_xlim(x_min, x_max)
axs[1].xaxis.set_ticks(xticks)
axs[1].set_xticklabels(labels)
axs[1].locator_params(axis='y', nbins=5)
axs[1].set_title(title + " (model - obs) [" + data_units + "]")
# Saving the plot and letting the user know what just happened
diag_name = E.get_diag_script_name()
output_file = E.get_plot_output_filename(diag_name=diag_name,
variable=datakey,
specifier=specifier)
output_dir = os.path.join(plot_dir, diag_name)
E.ensure_directory(output_dir)
handles, labels = axs[0].get_legend_handles_labels()
plt.legend(handles,
labels,
loc='center left',
bbox_to_anchor=(0.7, 0.5),
bbox_transform=plt.gcf().transFigure)
plt.savefig(os.path.join(output_dir, output_file))
info("", verbosity, 1)
info("Created image: ", verbosity, 1)
info(output_file, verbosity, 1)
obsfile.close() | 5,354,303 |
def collate_ribocounts_in_df(srp_df, region_type):
"""Collect all ribocount files inside a directory and write them as a dataframe"""
srp = srp_df.study_accession.tolist()[0]
srp_assembly_grouped = srp_df.groupby("assembly")
for assembly, srp_assembly_df in srp_assembly_grouped:
srp_path = srp_assembly_df.srp_path.tolist()[0]
experiments = list(sorted(srp_assembly_df.experiment_accession.unique()))
files = [
os.path.join(
srp_path,
"ribotricer_results",
"region_counts",
region_type,
"{}_counts_cnt.txt".format(experiment),
)
for experiment in experiments
]
files = [f for f in files if os.path.exists(f)]
if not files:
continue
collected_counts = read_raw_counts_into_matrix(files)
collected_counts.columns = [
"{}_{}_{}_{}".format(assembly, srp, srx, region_type)
for srx in collected_counts.columns
]
collected_counts = collected_counts.reset_index()
dir_to_write = os.path.join(
srp_path, "ribotricer_results", "ribotricer_ribo_counts_df"
)
mkdir_p(dir_to_write)
file_to_write = os.path.join(
dir_to_write, "{}_{}_{}.tsv".format(assembly, srp, region_type)
)
collected_counts.to_csv(file_to_write, sep="\t", index=False, header=True) | 5,354,304 |
def uplab_to_renotation_specification(spec, lab):
"""Convert a color in the normalized UP LAB space to its equivalent Munsell color.
Parameters
----------
lab : np.ndarray of shape (3,) and dtype float
The `l', `a-star` and `b-star` values for the color, with `l` in the domain [0, 1],
and `a-star` and `b-star` each in the domain [-0.5, 0.5].
Returns
-------
np.ndarray of shape (4,) and dtype float
A Colorlab-compatible Munsell specification (`hue_shade`, `value`, `chroma`, `hue_index`),
with `hue_shade` one of [0, 2.5, 5, 7.5], `value` one of [0, 1, 2, ..., 10],
`chroma` one of [0, 2, 4, ..., 50] and `hue_index` one of [1, 2, 3, ..., 10].
Notes
-----
Measures the distance in the UP LAB a-b color plane at the given `l` (luminosity) value
between the given `a*` and `b*` values and those of 4 bracketing `a*` and `b*` value
pairs from the Munsell renotation (`hue_shade` of 2.5, 5, 7.5 and 10, and `chroma` one
of [0, 2, 4, ..., 50]). Selects the one with the closest cartesian distance to the
given target.
"""
hue_shade, value, chroma, hue_index = spec
v_ren = value
if v_ren < 1:
v_ren = 1
elif v_ren > 9 and v_ren < 9.9:
v_ren = 9
v_ren = round(v_ren)
if np.isnan(hue_shade):
# Grays
spec[1] = v_ren
return spec
# Colors
c0, _ = divmod(chroma, 2)
c0 = c0 * 2
c1 = c0 + 2
h0, _ = divmod(hue_shade, 2.5)
h0 = h0 * 2.5
h1 = h0 + 2.5
l, a_star, b_star = lab
closest_dist = None
closest = None
for ct in [c0, c1]:
for ht in [h0, h1]:
test_spec = munsellkit.normalized_color(
np.array([ht, value, ct, hue_index]),
rounding='renotation', out='spec')
lt, at, bt = munsell_specification_to_uplab(test_spec)
distance_sq = (at - a_star) * (at - a_star) + (bt - b_star) * (bt - b_star)
# print(f'test {test_spec}: distance is {distance_sq}')
if closest_dist is None or closest_dist > distance_sq:
closest_dist = distance_sq
closest = test_spec
closest[1] = v_ren
return closest | 5,354,305 |
def test_set_convert2tfrecord(input_seq, output_tfrec, kmer, vocab, seq_type):
"""Converts reads to tfrecord, and saves to output file.
Args:
input_seq: string, path to the input fasta or fastq file.
output_tfrec: string, path to the output tfrecord file.
kmer: int, size of k for reads splitting.
vocab: string, path to the vocabulary file containing all k-mer tokens.
seq_type: string, reads format, should be fasta or fastq.
"""
tf.logging.info("Parsing vocabulary")
word_to_dic = vocab_dict(vocab)
with tf.python_io.TFRecordWriter(output_tfrec) as writer:
with open(input_seq) as handle:
for rec in SeqIO.parse(handle, seq_type):
seq = test_set_read_parser(rec)
kmer_array = seq2kmer(seq, kmer, word_to_dic)
data = \
{
'read': wrap_read(kmer_array)
}
feature = tf.train.Features(feature=data)
example = tf.train.Example(features=feature)
serialized = example.SerializeToString()
writer.write(serialized) | 5,354,306 |
def inv_solve(ridges, XTX, Xtrain=None, Ytrain=None, Xtest=None, Ytest=None,
XTY=None, weights=None, predictions=None, performance=None,
verbose=False, metric='r'):
"""solve ridge problem using cho inversion
Parameters
----------
- [see solve_ridge() and inv_factorize()]
"""
outputs = {
'weights': weights,
'predictions': predictions,
'performance': performance,
}
for r, ridge in enumerate(ridges):
# update output locations
out = {k: v[r, ...] for k, v in outputs.items() if v is not None}
dot = lambda *args: npfast.multi_dot(*args[:-1], out=out.get(args[-1]))
# compute weights
XTX_ridged = np.eye(*XTX.shape)
XTX_ridged *= ridge ** 2
XTX_ridged += XTX
XTX_inv = np.linalg.inv(XTX_ridged)
if weights is not None:
if XTY is not None:
B = dot(XTX_inv, XTY, 'weights')
else:
B = dot(XTX_inv, Xtrain.T, Ytrain, 'weights')
else:
B = None
# compute predictions
if (predictions is not None) or (performance is not None):
if B is not None:
Yhat = dot(Xtest, B, 'predictions')
elif XTY is not None:
Yhat = dot(Xtest, XTX_inv, XTY, 'predictions')
else:
Yhat = dot(Xtest, XTX_inv, Xtrain.T, Ytrain, 'predictions')
# compute model performance
if performance is not None:
out['performance'][:] = compute_performance(Ytest, Yhat, metric)
if verbose >= 1:
print_performance(out['performance'], r, ridges) | 5,354,307 |
def string_type_check(valid_strings, case_sensitive = True, metavar = None):
""" Creates an argparse type for a list of strings.
The passed argument is declared valid if it is a valid string which exists
in the passed list valid_strings. If case_sensitive is False, all input
strings and strings in valid_strings are processed as lowercase. Leading
and trailing whitespace is ignored in all strings.
Returns:
A function which can be passed as an argument type, when calling
add_argument on an ArgumentParser object
Raises:
ArgumentTypeError: Passed argument must be string within valid list.
"""
metavar = 'value' if metavar is None else metavar
valid_strings = [x.strip() for x in valid_strings]
if not case_sensitive:
valid_strings = [x.lower() for x in valid_strings]
def _type_checker(value):
value = str(value)
valid = True
if not case_sensitive:
value = value.lower()
if not value in valid_strings:
valid = False
case_msg = ' (case sensitive)' if case_sensitive else ''
msg = 'invalid choice: %s (valid settings for %s%s are: %s)' % (
value, metavar, case_msg, valid_strings.__str__()[1:-1])
if not valid:
raise argparse.ArgumentTypeError(msg)
return value
return _type_checker | 5,354,308 |
def _getBestSize(value):
"""
Give a size in bytes, convert it into a nice, human-readable value
with units.
"""
if value >= 1024.0**4:
value = value / 1024.0**4
unit = 'TB'
elif value >= 1024.0**3:
value = value / 1024.0**3
unit = 'GB'
elif value >= 1024.0**2:
value = value / 1024.0**2
unit = 'MB'
elif value >= 1024.0:
value = value / 1024.0
unit = 'kB'
else:
unit = 'B'
return value, unit | 5,354,309 |
def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
"""Generate a build file for bazel based upon the `tpl` template."""
if not out:
out = tpl
repository_ctx.template(
out,
Label("//llvm:%s.tpl" % tpl),
substitutions,
) | 5,354,310 |
def initialize():
"""Configure Plone instance using RelStorage."""
environment = Environment()
environment.setup() | 5,354,311 |
async def subscribe(ctx, *subreddit):
"""
This command will 'subscribe' to a reddit and will make posts from it.
Usage: r/sub <subreddit>
Ex. r/sub news funny husky
Permissions required: Administrator
:param ctx:
:param subreddit:
:return:
"""
sid = ctx.message.server.id
subs = data[sid]['watching']
added = []
for reddit in subreddit:
url = f"https://www.reddit.com/r/{reddit}/new/.json"
posts = await respcheck(url)
if posts:
if reddit.lower() in subs:
await bot.say(f'{reddit} is already in your list!')
continue
else:
subs.append(reddit.lower())
added.append(reddit.lower())
else:
await bot.say(f'Sorry, I can\'t reach {reddit}. '
f'Check your spelling or make sure that the reddit actually exists.')
if added:
data[sid]['watching'] = subs
await bot.say(f"Subreddit(s): {', '.join(added)} added!\n"
f"You will notice this change when I scour reddit again.")
fmtjson.edit_json('options', data)
commandinfo(ctx) | 5,354,312 |
def write_nc_uniform(topography, fobj, format='NETCDF3_64BIT_OFFSET'):
"""
Write topography into a NetCDF file.
Parameters
----------
topography : :obj:`SurfaceTopography`
The topography to write to disk.
fobj : str or stream
Name of the NetCDF file or file stream
format : str
NetCDF file format. Default is 'NETCDF3_64BIT_OFFSET'.
"""
if topography.communicator is not None and topography.communicator.size > 1:
from netCDF4 import Dataset, default_fillvals
kwargs = dict(format=format,
parallel=topography.is_domain_decomposed,
comm=topography.communicator)
var_kwargs = {'fill_value': default_fillvals['f8']}
else:
Dataset = _SpecialNetCDFFile
kwargs = dict(version=format_to_scipy_version[format],
maskandscale=True)
var_kwargs = {}
if not topography.is_domain_decomposed and \
topography.communicator.rank > 1:
return
with Dataset(fobj, 'w', **kwargs) as nc:
# Serialize info dictionary as JSON and write to NetCDF file
info = topography.info.copy()
try:
# We store the unit information separately
del info['unit']
except KeyError:
pass
if info != {}:
nc.json = json.dumps(info, ensure_ascii=True, cls=NumpyEncoder)
# Create dimensions and heights variable
try:
nx, ny = topography.nb_grid_pts
except ValueError:
nx, = topography.nb_grid_pts
nc.createDimension('x', nx)
if topography.dim > 1:
nc.createDimension('y', ny)
heights_var = nc.createVariable('heights', 'f8', ('x', 'y'), **var_kwargs)
else:
heights_var = nc.createVariable('heights', 'f8', ('x',), **var_kwargs)
# Create variables for x- and y-positions, but only if physical_sizes
# exist. (physical_sizes should always exist, but who knows...)
if topography.physical_sizes is not None:
x = topography.positions()
try:
sx, sy = topography.physical_sizes
x, y = x
except ValueError:
sx, = topography.physical_sizes
x_var = nc.createVariable('x', 'f8', ('x',), **var_kwargs)
x_var.length = sx
x_var.periodic = 1 if topography.is_periodic else 0
if topography.unit is not None:
# scipy.io.netcdf_file does not support UTF-8
x_var.unit = mangle_length_unit_ascii(topography.unit)
if topography.dim > 1:
x_var[...] = np.arange(nx) / nx * sx
y_var = nc.createVariable('y', 'f8', ('y',), **var_kwargs)
y_var.length = sy
y_var.periodic = 1 if topography.is_periodic else 0
if topography.unit is not None:
# scipy.io.netcdf_file does not support UTF-8
y_var.unit = mangle_length_unit_ascii(topography.unit)
y_var[...] = np.arange(ny) / ny * sy
if topography.is_domain_decomposed:
heights_var.set_collective(True)
heights_var[topography.subdomain_slices] = topography.heights()
else:
heights_var[...] = topography.heights()
if topography.unit is not None:
heights_var.unit = mangle_length_unit_ascii(topography.unit) | 5,354,313 |
def enable_oeenclave_debug(oe_enclave_addr):
"""For a given OE enclave, load its symbol and enable debug flag for all its TCS"""
enclave = oe_debug_enclave_t(oe_enclave_addr)
# Check if magic matches
if not enclave.is_valid():
return False
# No version specific checks.
# The contract will be extended in backwards compatible manner.
# Debugger may use version to take specific actions in future.
# Check if debugging is enabled.
if enclave.debug == 0:
print ("oelldb: Debugging not enabled for enclave %s" % enclave.path)
return False
# Check if the enclave is loaded in simulation mode.
if enclave.simulate != 0:
print ("oelldb: Enclave %s loaded in simulation mode" % enclave.path)
# Load symbols for the enclave
if load_enclave_symbol(enclave.path, enclave.base_address) != 1:
return False
print("oelldb: Symbols loaded for enclave \n")
for tcs in enclave.tcs:
set_tcs_debug_flag(tcs)
print("oelldb: All tcs set to debug for enclave \n")
return True | 5,354,314 |
def loadxrmcresult_xmimsim(xmimsimpath, outradix="out", convoluted=False):
"""XRMC result based on input files converted from XMIMSIM"""
xrmcoutpath = os.path.join(xmimsimpath, "xrmc", "output")
if convoluted:
suffix = "_convoluted"
else:
suffix = "_lines"
return loadxrmcresult(xrmcoutpath, outradix + suffix, ext=".dat") | 5,354,315 |
def create_line_segments(df, x="lon", y="lat", epsg=4269):
"""Creates a GeodataFrame of line segments from the
shapes dataframe (CRS is NAD83)
Params:
df (DataFrame): pandas DataFrame
x, y (str, optional) Default values x="lon", y="lat",
column names for x and y coordinates
epsg (int): Default value epsg=4269; EPSG value for x,y coordinate system
Returns:
gdf: (GeoDataFrame) Line GeoDataFrame in passed Coordinate System
"""
if df[x].isna().sum() > 0 or df[y].isna().sum() > 0:
raise f"DataFrame contains Null coordinates; consider removing rows with Null {x,y} values"
points = [Point(xy) for xy in zip(df[x], df[y])]
gdf = gpd.GeoDataFrame(df.copy(), geometry=points)
line_segments = (
gdf.groupby(["shape_id"])["geometry"]
.apply(lambda x: LineString(x.tolist()))
.reset_index()
)
gdf_out = gpd.GeoDataFrame(line_segments, geometry="geometry", crs=from_epsg(epsg))
return gdf_out | 5,354,316 |
def population_correlation(data_matrix, x_index, y_index):
"""
data_matrix is a numpy multi-dimensional array (matrix)
x_index and y_index are the index for the first and second variables respectively
it returns the correlation between two variables in a data_matrix
"""
transposed_data = data_matrix.transpose()
x_population = transposed_data[x_index]
x_mean = np.mean(x_population)
x_std = np.std(x_population)
y_population = transposed_data[y_index]
y_mean = np.mean(y_population)
y_std = np.std(y_population)
# To calculate the expectation means to calculate the cov(x_population, y_population)
# This can also be done using numpy. For that use: np.cov(x_population, y_population, bias=True)
# bias=True indicates that we are calculating the population covariance
# np.cov returns a bxb matrix, where b is the amount of vectors passed as parameter, in our case b=2
expectation = np.mean((x_population - x_mean) * (y_population - y_mean))
std_product = x_std * y_std
return expectation/std_product | 5,354,317 |
def upload(workspace: str, table: str) -> Any:
"""
Store a nested_json tree into the database in coordinated node and edge tables.
`workspace` - the target workspace.
`table` - the target table.
`data` - the nested_json data, passed in the request body.
"""
# Set up the parameters.
data = request.data.decode("utf8")
space = db.db(workspace)
edgetable_name = f"{table}_edges"
int_nodetable_name = f"{table}_internal_nodes"
leaf_nodetable_name = f"{table}_leaf_nodes"
# Set up the database targets.
if space.has_collection(edgetable_name):
edgetable = space.collection(edgetable_name)
else:
edgetable = space.create_collection(edgetable_name, edge=True)
if space.has_collection(int_nodetable_name):
int_nodetable = space.collection(int_nodetable_name)
else:
int_nodetable = space.create_collection(int_nodetable_name)
if space.has_collection(leaf_nodetable_name):
leaf_nodetable = space.collection(leaf_nodetable_name)
else:
leaf_nodetable = space.create_collection(leaf_nodetable_name)
# Analyze the nested_json data into a node and edge table.
(nodes, edges) = analyze_nested_json(data, int_nodetable_name, leaf_nodetable_name)
# Upload the data to the database.
edgetable.insert_many(edges)
int_nodetable.insert_many(nodes[0])
leaf_nodetable.insert_many(nodes[1])
return dict(
edgecount=len(edges), int_nodecount=len(nodes[0]), leaf_nodecount=len(nodes[1])
) | 5,354,318 |
def validate_google_login(email):
"""
Validate a login completed via Google, returning the user id on success.
An ``ODPIdentityError`` is raised if the login cannot be permitted for any reason.
:param email: the Google email address
:raises ODPUserNotFound: if there is no user account for the given email address
:raises ODPAccountLocked: if the user account has been temporarily locked
:raises ODPAccountDisabled: if the user account has been deactivated
:raises ODPEmailNotVerified: if the email address has not been verified
"""
user = get_user_by_email(email)
if not user:
raise x.ODPUserNotFound
if is_account_locked(user.id):
raise x.ODPAccountLocked
if not user.active:
raise x.ODPAccountDisabled
return user.id | 5,354,319 |
def add_months(start_date, months, date_format=DATE_FORMAT):
"""
Return a date with an added desired number of business months
Example 31/1/2020 + 1 month = 29/2/2020 (one business month)
"""
new_date = start_date + relativedelta(months=+months)
return new_date.strftime(date_format) | 5,354,320 |
def inVolts(mv):
""" Converts millivolts to volts... you know, to keep the API
consistent. """
return mv/1000.0 | 5,354,321 |
def test_repository_get_changes_in_a_commit(git_repository):
"""Test getting changes in a commit with multiple change types."""
commit = git_repository.get_commit(revision="8853e0c")
changes = {c.a_path: c for c in commit.get_changes()}
assert "M" == changes["A"].change_type
assert "A" == changes["A"].b_path
assert not changes["A"].added
assert not changes["A"].deleted
assert changes["B"].deleted
assert "B" == changes["B"].b_path
assert "R" == changes["C"].change_type
assert "data/X" == changes["C"].b_path
assert not changes["C"].added
assert not changes["C"].deleted
assert not changes["A"].added | 5,354,322 |
def extract_hit(
hit: Mapping[str, Any],
includes: Tuple[str] = (ID_FIELD,),
source: str = '_source'
) -> Mapping[str, Any]:
"""
Extract a document from a single search result hit.
:param hit: the search hit document
:param includes: the metadata keys to include in the return document
:param source: the key that contains the source document
:return:
"""
doc = {
**{
k: hit.get(k) for k in includes
},
**hit.get(source)
}
# If the document ID is included...
if ID_FIELD in doc:
# ...convert it to a UUID.
doc[ID_FIELD] = uuid.UUID(doc.get(ID_FIELD))
return doc | 5,354,323 |
def ChangeLookAndFeel(index):
"""
:param index:
"""
# global LOOK_AND_FEEL_TABLE
if sys.platform == 'darwin':
print('*** Changing look and feel is not supported on Mac platform ***')
return
# look and feel table
try:
colors = LOOK_AND_FEEL_TABLE[index]
SetOptions(background_color=colors['BACKGROUND'],
text_element_background_color=colors['BACKGROUND'],
element_background_color=colors['BACKGROUND'],
text_color=colors['TEXT'],
input_elements_background_color=colors['INPUT'],
button_color=colors['BUTTON'],
progress_meter_color=colors['PROGRESS'],
border_width=colors['BORDER'],
slider_border_width=colors['SLIDER_DEPTH'],
progress_meter_border_depth=colors['PROGRESS_DEPTH'],
scrollbar_color=(colors['SCROLL']),
element_text_color=colors['TEXT'],
input_text_color=colors['TEXT_INPUT'])
except: # most likely an index out of range
print('** Warning - Look and Feel value not valid. Change your ChangeLookAndFeel call. **') | 5,354,324 |
def test_schema_component_equality_operators():
"""Test the usage of == for Column, Index and MultiIndex."""
column = Column(Int, Check(lambda s: s >= 0))
index = Index(Int, [Check(lambda x: 1 <= x <= 11, element_wise=True)])
multi_index = MultiIndex(
indexes=[
Index(Int,
Check(lambda s: (s < 5) & (s >= 0)),
name="index0"),
Index(String,
Check(lambda s: s.isin(["foo", "bar"])),
name="index1"),
]
)
not_equal_schema = DataFrameSchema({
"col1": Column(Int, Check(lambda s: s >= 0))
})
assert column == copy.deepcopy(column)
assert column != not_equal_schema
assert index == copy.deepcopy(index)
assert index != not_equal_schema
assert multi_index == copy.deepcopy(multi_index)
assert multi_index != not_equal_schema | 5,354,325 |
def deserialize_wrapper(func, data):
"""
Convert generic productmd exceptions into validation errors.
"""
try:
func(data)
except KeyError as e:
raise serializers.ValidationError(
{'detail': 'Error parsing productmd metadata.',
'reason': 'Missing key %s' % e.message}
)
except Exception as e:
raise serializers.ValidationError(
{'detail': 'Error parsing productmd metadata.',
'reason': str(e)}
) | 5,354,326 |
def test_invalid_response_check(check, instance, aggregator):
"""
Testing invalid fargate metadata payload.
"""
with mock.patch('datadog_checks.ecs_fargate.ecs_fargate.requests.get', return_value=MockResponse("{}", 200)):
check.check(instance)
aggregator.assert_service_check("fargate_check", status=FargateCheck.WARNING, tags=INSTANCE_TAGS, count=1) | 5,354,327 |
def generateUserIDToken(id):
"""Generates a unique user id token."""
t = int(time.time() * 1000)
r = int(random.random() * 100000000000000000)
data = "%s %s %s %s" % (ip, t, r, id)
return md5(data.encode('utf-8')).hexdigest() | 5,354,328 |
def grad_simplex_monomial_basis(dims, n):
"""Return the gradients of the functions returned by
:func:`simplex_monomial_basis`.
:returns: a :class:`tuple` of functions, each of which
accepts arrays of shape *(dims, npts)*
and returns a :class:`tuple` of length *dims* containing
the derivatives along each axis as an array of size *npts*.
'Scalar' evaluation, by passing just one vector of length *dims*,
is also supported.
.. versionadded:: 2016.1
"""
warn("grad_simplex_monomial_basis_with_mode_ids is deprecated. "
"Use monomial_basis_for_space instead. "
"This function will go away in 2022.",
DeprecationWarning, stacklevel=2)
from pytools import generate_nonnegative_integer_tuples_summing_to_at_most \
as gnitstam
return tuple(partial(grad_monomial, order) for order in gnitstam(n, dims)) | 5,354,329 |
def test_claims_check():
"""
arg={'required': True, 'id_token': ['auth_time']}
"""
_info = setup_conv()
conv = _info['conv']
# Need IdToken
conv.events.store(EV_PROTOCOL_RESPONSE, ACCESS_TOKEN_RESPONSE_1)
chk = ClaimsCheck()
kwargs = {'required': True, 'id_token': ['auth_time']}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == OK | 5,354,330 |
def get_collection() -> Collection:
"""Коллекция для хранения моделей."""
return _COLLECTION | 5,354,331 |
def webp_convert(ifile, ofile, m=6, q=90):
"""
WebP: Convert to WebP format
"""
try:
subprocess.check_output([bpath.wppath, '-m', str(m), '-q', str(q), ifile, '-o', ofile], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logexc(e, "cwebp conversion failed") | 5,354,332 |
def ask_for_flasherhwver():
"""
Ask for the flasher version, either 1 or 2 right now...
"""
#if FLASHER_SKIP_ON_VALID_DETECTION and FLASHER_VERSION != 1:
# return FLASHER_VERSION
FLASHER_VERSION = 1
flash_version = FLASHER_VERSION
if FLASHER_VERSION is None:
while True:
try:
flash_version = int(raw_input("--- Enter version of programmer hardware [Available Versions: Programmer V1 or Programmer V2]: ".format(FLASHVER=flash_version)))
except:
pass
if flash_version == 1 or flash_version == 2:
break
print("<<< USER REPORTED HARDWARE FLASHER REVISION AS VERSION", flash_version, ">>>")
return flash_version | 5,354,333 |
def real_to_complex_channels(x, separate_real_imag=False):
""" Inverse of complex_as_real_channels: C*2 real channels (or 2*C if separate_real_imag) to C complex channels. """
if separate_real_imag:
channel_shape = (2, -1)
permute = (0, 2, 3, 4, 1)
else:
channel_shape = (-1, 2)
permute = (0, 1, 3, 4, 2)
return torch.view_as_complex(channel_reshape(x, channel_shape).permute(*permute).contiguous()) | 5,354,334 |
def create_coffee_machine() -> CoffeeMachine:
"""Create CoffeeMachine object for testing"""
_coffee_machine = CoffeeMachine()
_coffee_machine.refill_water()
_coffee_machine.refill_milk()
_coffee_machine.refill_coffee_beans()
return _coffee_machine | 5,354,335 |
def validate_mqtt_vacuum(value):
"""Validate MQTT vacuum schema."""
schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE}
return schemas[value[CONF_SCHEMA]](value) | 5,354,336 |
def detect(iring, mode, axis=None, *args, **kwargs):
"""Apply square-law detection to create polarization products.
Args:
iring (Ring or Block): Input data source.
mode (string):
``'scalar': x -> real x.x*``
``'jones': x,y -> complex x.x* + 1j*y.y*, x.y*``
``'stokes': x,y -> real I, Q, U, V``
axis: Integer or string specifying the polarization axis. Defaults to
'pol'. Not used if mode = 'scalar'.
*args: Arguments to ``bifrost.pipeline.TransformBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.TransformBlock``.
**Tensor semantics**::
Input: [..., 'pol', ...], dtype = any complex, space = CUDA
Output: [..., 'pol', ...], dtype = real or complex, space = CUDA
Returns:
DetectBlock: A new block instance.
"""
return DetectBlock(iring, mode, axis, *args, **kwargs) | 5,354,337 |
def url_download_interactive(url, output_file, title='', chunk_size=102400):
"""
Interactively downloads a given file url to a given output file.
:type url: string
:param url: URL for the file to be download
:type output_file: string
:param output_file: file name or absolute path on which to save the file to
:type title: string
:param title: optional title to go along the progress bar
:type chunk_size: integer
:param chunk_size: amount of data to read at a time
"""
output_dir = os.path.dirname(output_file)
output_file = open(output_file, 'w+b')
input_file = urllib2.urlopen(url)
try:
file_size = int(input_file.headers['Content-Length'])
except KeyError:
raise ValueError('Could not find file size in HTTP headers')
logging.info('Downloading %s, %s to %s', os.path.basename(url),
output.display_data_size(file_size), output_dir)
progress_bar = output.ProgressBar(maximum=file_size, title=title)
# Download the file, while interactively updating the progress
progress_bar.draw()
while True:
data = input_file.read(chunk_size)
if data:
progress_bar.append_amount(len(data))
output_file.write(data)
else:
progress_bar.update_amount(file_size)
break
output_file.close() | 5,354,338 |
def DiffedUpdateItem(
Table: TableResource, Key: ItemKey, before: InputItem, after: InputItem, **kwargs
) -> InputItem:
"""Safe top-level diff update that requires only 'before' and 'after' dicts.
By calling this you are trusting that we will make a choice about
whether or not you actually have an update to perform.
"""
item_diff = build_update_diff(before, after)
if item_diff:
logger.info(
f"Updating item {Key} because there was an item diff.",
extra=dict(json=dict(item_diff=item_diff)),
)
kwargs.pop("condition_exists", None)
set_and_remove = select_attributes_for_set_and_remove(item_diff)
return UpdateItem(
Table,
Key,
set_attrs=set_and_remove["set_attrs"],
remove_attrs=set_and_remove["remove_attrs"],
condition_exists=True,
**kwargs,
)
else:
logger.debug(
f"Not updating item {Key} because there was "
"no meaningful difference between the items",
extra=dict(json=dict(before=before, after=after)),
)
return before | 5,354,339 |
def is_watchdog_supported():
""" Return ``True`` if watchdog is available."""
try:
import watchdog
except ImportError:
return False
return True | 5,354,340 |
def test_concat_incompatible_cols(test_pd_df):
"""Check that calling concat on a single-item list returns identical object"""
df1 = IamDataFrame(test_pd_df)
test_pd_df["extra_col"] = "foo"
df2 = IamDataFrame(test_pd_df)
match = "Items have incompatible timeseries data dimensions"
with pytest.raises(ValueError, match=match):
concat([df1, df2]) | 5,354,341 |
def set_bit(arg1, x, bit, y):
"""
set_bit(Int_ctx arg1, Int_net x, unsigned int bit, Int_net y) -> Int_net
Parameters
----------
arg1: Int_ctx
x: Int_net
bit: unsigned int
y: Int_net
"""
return _api.set_bit(arg1, x, bit, y) | 5,354,342 |
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).time_formats[format] | 5,354,343 |
def test_list_posts(mock_client):
"""list_posts should return a generator of posts"""
client = api.Api(UserFactory.create())
posts = client.list_posts("channel", DEFAULT_LISTING_PARAMS)
assert posts == mock_client.subreddit.return_value.hot.return_value
mock_client.subreddit.return_value.hot.assert_called_once_with(
limit=25, params={"count": 0}
)
mock_client.subreddit.assert_called_once_with("channel") | 5,354,344 |
def resource_type_service(resource_type):
"""Gets the service name from a resource type.
:exc:`ValueError` is raised if the resource type is invalid, see
:func:`parse_resource_type`.
>>> resource_type_service('AWS::ECS::Instance')
'ECS'
"""
return parse_resource_type(resource_type)[1] | 5,354,345 |
def loads(ss):
""" loads(ss)
Load a struct from the given string.
Parameters
----------
ss : (Unicode) string
A serialized struct (obtained using ssdf.saves()).
"""
# Check
if not isinstance(ss, basestring):
raise ValueError('ssdf.loads() expects a string.')
# Read
reader = _SSDFReader()
return reader.text_to_struct(ss) | 5,354,346 |
def guess_mime_type(file_object: IO) -> str:
"""Guess mime type from file extension."""
mime_type, _encoding = mimetypes.guess_type(file_object.name)
if not mime_type:
mime_type = "application/octet-stream"
return mime_type | 5,354,347 |
def postcount_test(metadict_friends):
"""Среднее число постов по выборке, чтобы выделить активных/неактивных неймфагов."""
all_postcount = 0
for namefag in metadict_friends.keys():
name_number = namefag[0]
name_postcount = cursor.execute("SELECT postcount FROM namefags WHERE number=?"\
,(name_number,)).fetchall()
all_postcount = all_postcount + int(name_postcount[0][0])
name_number = len(metadict_friends)
medial_postcount = all_postcount / name_number
return medial_postcount,all_postcount | 5,354,348 |
def cli(ctx, debug):
"""
This is a tool to generate an excel file based on a provided source excel and transformation mapping
"""
log_format = '%(asctime)s|%(levelname)s|%(name)s|(%(funcName)s):-%(message)s'
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO, stream=sys.stdout, format=log_format)
if ctx.invoked_subcommand not in ['version']:
logger.info(f'{"-" * 20} Starting Logging for {ctx.invoked_subcommand} (v{__version__}) {"-" * 20}') | 5,354,349 |
def extract_stack_name(fields):
"""_extract_stack_name(self, fields: list[str]) -> str
Extract a stack name from the fields
Examples:
ffffffff818244f2 [unknown] ([kernel.kallsyms]) -> [kernel.kallsyms]
1094d __GI___libc_recvmsg (/lib/x86_64-linux-gnu/libpthread-2.23.so) -> __GI__libc_recvmsg
"""
if fields[1] == '[unknown]':
return to_module_name(fields[2][1:-1])
return fields[1] | 5,354,350 |
def parse_args(argv):
"""Parse any command line arguments."""
# Set the default logging level to DEBUG
# log_level = logging.INFO
log_level = logging.DEBUG
# This is the dictionary of arguments.
arg_dict = {'start_date': DEFAULT_START_DATE,
'end_date': DEFAULT_END_DATE,
'type': DEFAULT_REQUEST_TYPE}
try:
opts, args = getopt.getopt(argv,
"hds:e:t:",
["help",
"debug",
"start=",
"end=",
"type="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-d", "--debug"):
log_level = logging.DEBUG
print 'log level is at DEBUG'
elif opt in ("-s", "--start"):
arg_dict['start_date'] = arg
elif opt in ("-e", "--end"):
arg_dict['end_date'] = arg
elif opt in ("-t", "--type"):
arg_dict['type'] = arg
# If this file is running as main, do logging.
if __name__ == "__main__":
logging.basicConfig(filename="log_gpo_tutorial.txt",
level=log_level,
filemode="a")
logging.info('start: ' + strftime("%c"))
return arg_dict | 5,354,351 |
def parse(data: Data, config: RawConfigParser):
"""Parses all subjects documents.
:param data: data object
:type data: Data
:param config: config from config file
:type config: RawConfigParser
"""
while True:
subject = data.get_not_parsed()
if subject is None: # break if no not processed subject exists
break
for document in subject['documents']:
path = document['file']
pdf = parser.from_file(path)
try:
pdf_content = pdf['content']
pdf_content = re.sub(r'(,\d\d)', r'\1|', pdf_content) # insert separator behind number
pdf_content = re.sub(r'(\d)\s+(\d)', r'\1\2', pdf_content) # remove spaces between numbers
except TypeError:
pdf_content = ""
values = {}
for items in config.items('parser'):
values[items[0]] = _extract(pdf_content, items[1])
data.update_parsed(subject['ico'], path, values) | 5,354,352 |
def get_pafy_stream_obj(url,format=None,only_video=False):
"""This function return stream object from pafy
Arguments:
url {string} -- The url of the video from youtube
Returns:
Stream_Obj -- This is a object of Stream class from pafy
"""
try:
obj = pafy.new(url)
# returning only the pafy obj if format is not given
if format == None:
return obj
stream_obj = None
# returning format specified in the parameter
if format == 'AUDIO':
logging.debug("Getting audio pafy stream_object")
stream_obj = obj.getbestaudio(preftype='m4a')
if format == 'VIDEO':
if only_video:
# get only video at 1080p
# stream_obj = obj.getbestvideo(preftype='mp4')
## iterating from backward as best streams are there and
## slecting best 1920x1080p mp4 stream
logging.debug("Getting HQ video pafy stream_object")
for stream in obj.videostreams[::-1]:
if stream.extension == 'mp4':
if stream.dimensions[0] == 1920 and stream.dimensions[1] == 1080:
stream_obj = stream
break
else:
# get best will return both audio and obj normaly at 640p
logging.debug("Getting normal-video pafy stream_object")
stream_obj = obj.getbest(preftype='mp4')
return stream_obj
except OSError as e:
logging.debug("OSError in new pafy")
logging.debug(e)
raise OSError
except Exception as e:
logging.debug("Error occured in new pafy")
logging.debug(e)
return None | 5,354,353 |
def walk_storage(path, topdown=True, onerror=None, followlinks=False,
storage=default_storage):
"""
Generate the file names in a stored directory tree by walking the tree
top-down.
For each directory in the tree rooted at the directory top (including top
itself), it yields a 3-tuple (dirpath, dirnames, filenames).
This is intended for use with an implementation of the Django storage API.
You can specify something other than the default storage instance with
the storage keyword argument.
"""
if not topdown:
raise NotImplementedError
if onerror:
raise NotImplementedError
roots = [path]
while len(roots):
new_roots = []
for root in roots:
dirs, files = storage.listdir(root)
files = [force_bytes(f) for f in files]
dirs = [force_bytes(d) for d in dirs]
yield root, dirs, files
for dn in dirs:
new_roots.append('%s/%s' % (root, dn))
roots[:] = new_roots | 5,354,354 |
def check_reynolds_number(Re):
"""Reynolds number must be between 38e3 and 4e6
Parameters
----------
Re : float
Reynolds number
Raises
------
ValueError
If the value of the Reynolds number is outside the defined layers.
"""
if not (Re_list[0] <= Re <= Re_list[-1]):
raise ValueError('Reynolds number is not inside correct range') | 5,354,355 |
def create_fixtures(model_names, excludes=[], from_file=False):
"""Create json fixtures
Parameters:
model_names (list of str): names of models to create fixtures. If empty, create all.
excludes (list of str): names of models to exclude
from_file (boolean): True - create from xlsx file, False - create from db.
"""
models = get_fixture_models(model_names, excludes)
for model in models:
if from_file:
create_fixture_from_file(model)
else:
create_fixture_from_db(model) | 5,354,356 |
def valid_shape(shape):
"""
@returns: True if given shape is a valid tetris shape
"""
return shape in SHAPES and len(shape) == 1 | 5,354,357 |
def get_device(device_id):
"""
@api {get} /devices/:device_id Get Unique Device
@apiVersion 1.0.0
@apiName GetDevice
@apiGroup Device
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.device Device object
"""
device_obj = Device.query.get(device_id)
if not device_obj:
return jsonify(success=False, message='not found'), 404
return jsonify(success=True, message={'device': device_obj.to_dict()}) | 5,354,358 |
def read_imgs(filename, num_images):
"""读入图片数据
:param filename:
:param num_images:
:return:
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
28 * 28 * num_images * 1)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, 28, 28, 1)
return data | 5,354,359 |
def test_toAssembly():
"""For testing toAssembly function"""
assert toAssembly(000) == 'HLT'
assert toAssembly(101) == 'ADD 1'
assert toAssembly(202) == 'SUB 2'
assert toAssembly(303) == 'STA 3'
assert toAssembly(404) == 'LDA 4'
assert toAssembly(505) == 'BRA 5'
assert toAssembly(606) == 'BRZ 6'
assert toAssembly(700) == 'INP'
assert toAssembly(800) == 'OUT' | 5,354,360 |
def all_faces(coord, connect):
""" Gets vertices of all faces of the mesh.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
Returns:
Corresponding nodes.
"""
nodes_per_face = np.array([connect[:, [1,2,3,4]], connect[:, [5,6,7,8]], \
connect[:, [6,7,3,2]], connect[:, [7,8,4,3]], \
connect[:, [6,5,1,2]], connect[:, [5,8,4,1]]]).reshape(-1,4)
ind_faces = npi.indices(coord[:,0], nodes_per_face.flatten()).reshape(-1, 4)
return ind_faces | 5,354,361 |
def _resource_path_dev(relative_path):
"""
:return: Package relative path to resource
"""
base_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(base_path, relative_path) | 5,354,362 |
def test_default_transfomer_visits_tokens():
"""
Ensures the default instance of the ``SemselParser`` uses the appropraite
tokenized tree transformer.
"""
parser = SemselParser()
assert isinstance(parser.transformer, SemselTransformer)
assert parser.transformer.__visit_tokens__ | 5,354,363 |
def edit_assignment(request_ctx, course_id, id, assignment_name=None, assignment_position=None, assignment_submission_types=None, assignment_allowed_extensions=None, assignment_turnitin_enabled=None, assignment_turnitin_settings=None, assignment_peer_reviews=None, assignment_automatic_peer_reviews=None, assignment_notify_of_update=None, assignment_group_category_id=None, assignment_grade_group_students_individually=None, assignment_external_tool_tag_attributes=None, assignment_points_possible=None, assignment_grading_type=None, assignment_due_at=None, assignment_lock_at=None, assignment_unlock_at=None, assignment_description=None, assignment_assignment_group_id=None, assignment_muted=None, assignment_assignment_overrides=None, assignment_only_visible_to_overrides=None, assignment_published=None, assignment_grading_standard_id=None, **request_kwargs):
"""
Modify an existing assignment.
If the assignment[assignment_overrides] key is absent, any existing
overrides are kept as is. If the assignment[assignment_overrides] key is
present, existing overrides are updated or deleted (and new ones created,
as necessary) to match the provided list.
NOTE: The assignment overrides feature is in beta.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param id: (required) ID
:type id: string
:param assignment_name: (optional) The assignment name.
:type assignment_name: string or None
:param assignment_position: (optional) The position of this assignment in the group when displaying assignment lists.
:type assignment_position: integer or None
:param assignment_submission_types: (optional) List of supported submission types for the assignment. Unless the assignment is allowing online submissions, the array should only have one element. If not allowing online submissions, your options are: "online_quiz" "none" "on_paper" "online_quiz" "discussion_topic" "external_tool" If you are allowing online submissions, you can have one or many allowed submission types: "online_upload" "online_text_entry" "online_url" "media_recording" (Only valid when the Kaltura plugin is enabled)
:type assignment_submission_types: string or None
:param assignment_allowed_extensions: (optional) Allowed extensions if submission_types includes "online_upload" Example: allowed_extensions: ["docx","ppt"]
:type assignment_allowed_extensions: string or None
:param assignment_turnitin_enabled: (optional) Only applies when the Turnitin plugin is enabled for a course and the submission_types array includes "online_upload". Toggles Turnitin submissions for the assignment. Will be ignored if Turnitin is not available for the course.
:type assignment_turnitin_enabled: boolean or None
:param assignment_turnitin_settings: (optional) Settings to send along to turnitin. See Assignment object definition for format.
:type assignment_turnitin_settings: string or None
:param assignment_peer_reviews: (optional) If submission_types does not include external_tool,discussion_topic, online_quiz, or on_paper, determines whether or not peer reviews will be turned on for the assignment.
:type assignment_peer_reviews: boolean or None
:param assignment_automatic_peer_reviews: (optional) Whether peer reviews will be assigned automatically by Canvas or if teachers must manually assign peer reviews. Does not apply if peer reviews are not enabled.
:type assignment_automatic_peer_reviews: boolean or None
:param assignment_notify_of_update: (optional) If true, Canvas will send a notification to students in the class notifying them that the content has changed.
:type assignment_notify_of_update: boolean or None
:param assignment_group_category_id: (optional) If present, the assignment will become a group assignment assigned to the group.
:type assignment_group_category_id: integer or None
:param assignment_grade_group_students_individually: (optional) If this is a group assignment, teachers have the options to grade students individually. If false, Canvas will apply the assignment's score to each member of the group. If true, the teacher can manually assign scores to each member of the group.
:type assignment_grade_group_students_individually: integer or None
:param assignment_external_tool_tag_attributes: (optional) Hash of attributes if submission_types is ["external_tool"] Example: external_tool_tag_attributes: { // url to the external tool url: "http://instructure.com", // create a new tab for the module, defaults to false. new_tab: false }
:type assignment_external_tool_tag_attributes: string or None
:param assignment_points_possible: (optional) The maximum points possible on the assignment.
:type assignment_points_possible: float or None
:param assignment_grading_type: (optional) The strategy used for grading the assignment. The assignment is ungraded if this field is omitted.
:type assignment_grading_type: string or None
:param assignment_due_at: (optional) The day/time the assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_due_at: timestamp or None
:param assignment_lock_at: (optional) The day/time the assignment is locked after. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_lock_at: timestamp or None
:param assignment_unlock_at: (optional) The day/time the assignment is unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_unlock_at: timestamp or None
:param assignment_description: (optional) The assignment's description, supports HTML.
:type assignment_description: string or None
:param assignment_assignment_group_id: (optional) The assignment group id to put the assignment in. Defaults to the top assignment group in the course.
:type assignment_assignment_group_id: integer or None
:param assignment_muted: (optional) Whether this assignment is muted. A muted assignment does not send change notifications and hides grades from students. Defaults to false.
:type assignment_muted: boolean or None
:param assignment_assignment_overrides: (optional) List of overrides for the assignment. NOTE: The assignment overrides feature is in beta.
:type assignment_assignment_overrides: assignmentoverride or None
:param assignment_only_visible_to_overrides: (optional) Whether this assignment is only visible to overrides (Only useful if 'differentiated assignments' account setting is on)
:type assignment_only_visible_to_overrides: boolean or None
:param assignment_published: (optional) Whether this assignment is published. (Only useful if 'draft state' account setting is on) Unpublished assignments are not visible to students.
:type assignment_published: boolean or None
:param assignment_grading_standard_id: (optional) The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course. This will update the grading_type for the course to 'letter_grade' unless it is already 'gpa_scale'.
:type assignment_grading_standard_id: integer or None
:return: Edit an assignment
:rtype: requests.Response (with Assignment data)
"""
assignment_submission_types_types = ('online_quiz', 'none', 'on_paper', 'online_quiz', 'discussion_topic', 'external_tool', 'online_upload', 'online_text_entry', 'online_url', 'media_recording')
assignment_grading_type_types = ('pass_fail', 'percent', 'letter_grade', 'gpa_scale', 'points')
utils.validate_attr_is_acceptable(assignment_submission_types, assignment_submission_types_types)
utils.validate_attr_is_acceptable(assignment_grading_type, assignment_grading_type_types)
path = '/v1/courses/{course_id}/assignments/{id}'
payload = {
'assignment[name]' : assignment_name,
'assignment[position]' : assignment_position,
'assignment[submission_types][]' : assignment_submission_types,
'assignment[allowed_extensions]' : assignment_allowed_extensions,
'assignment[turnitin_enabled]' : assignment_turnitin_enabled,
'assignment[turnitin_settings]' : assignment_turnitin_settings,
'assignment[peer_reviews]' : assignment_peer_reviews,
'assignment[automatic_peer_reviews]' : assignment_automatic_peer_reviews,
'assignment[notify_of_update]' : assignment_notify_of_update,
'assignment[group_category_id]' : assignment_group_category_id,
'assignment[grade_group_students_individually]' : assignment_grade_group_students_individually,
'assignment[points_possible]' : assignment_points_possible,
'assignment[grading_type]' : assignment_grading_type,
'assignment[due_at]' : assignment_due_at,
'assignment[lock_at]' : assignment_lock_at,
'assignment[unlock_at]' : assignment_unlock_at,
'assignment[description]' : assignment_description,
'assignment[assignment_group_id]' : assignment_assignment_group_id,
'assignment[muted]' : assignment_muted,
'assignment[assignment_overrides]' : assignment_assignment_overrides,
'assignment[only_visible_to_overrides]' : assignment_only_visible_to_overrides,
'assignment[published]' : assignment_published,
'assignment[grading_standard_id]' : assignment_grading_standard_id,
}
for attribute, value in list((assignment_external_tool_tag_attributes or {}).items()):
payload['assignment[external_tool_tag_attributes][{}]'.format(attribute)] = value
url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response | 5,354,364 |
def alpha_097(code, end_date=None, fq="pre"):
"""
公式:
STD(VOLUME,10)
Inputs:
code: 股票池
end_date: 查询日期
Outputs:
因子的值
"""
end_date = to_date_str(end_date)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_191(**locals()) | 5,354,365 |
def weights_init(layer):
"""
weights initialization
Args :
--layer: one layer instance
"""
if isinstance(layer, t.nn.Linear) or isinstance(layer, t.nn.BatchNorm1d):
t.nn.init.normal_(layer.weight, 0.0, 0.02) # we use 0.02 as initial value
t.nn.init.constant_(layer.bias, 0.0) | 5,354,366 |
def filter(p):
"""
把索引list转换为单词list
"""
result = []
for idx in p:
if idx == stop_tag:
break
if idx == padding_tag: continue
result.append(index_word[idx])
return result | 5,354,367 |
def multimodal(seed: tp.Optional[int] = None, para: bool = False) -> tp.Iterator[Experiment]:
"""Experiment on multimodal functions, namely hm, rastrigin, griewank, rosenbrock, ackley, lunacek,
deceptivemultimodal.
0 or 5 dummy variable per real variable.
Base dimension 3 or 25.
Budget in 3000, 10000, 30000, 100000.
Sequential.
"""
seedg = create_seed_generator(seed)
names = ["hm", "rastrigin", "griewank", "rosenbrock", "ackley", "lunacek", "deceptivemultimodal"]
# Keep in mind that Rosenbrock is multimodal in high dimension http://ieeexplore.ieee.org/document/6792472/.
optims = get_optimizers("basics", seed=next(seedg))
if not para:
optims += get_optimizers("scipy", seed=next(seedg))
# + list(sorted(x for x, y in ng.optimizers.registry.items() if "Chain" in x or "BO" in x))
functions = [
ArtificialFunction(name, block_dimension=bd, useless_variables=bd * uv_factor)
for name in names
for bd in [3, 25]
for uv_factor in [0, 5]
]
for func in functions:
for optim in optims:
for budget in [3000, 10000, 30000, 100000]:
for nw in [1000] if para else [1]:
yield Experiment(func, optim, budget=budget, num_workers=nw, seed=next(seedg)) | 5,354,368 |
def assert_almost_equal(actual: numpy.ndarray, desired: List[Union[complex, int]]):
"""
usage.scipy: 6
"""
... | 5,354,369 |
def get_sequin_annots(sequin_path, ref_contigs, quiet=False):
"""
Load all genes in the Sequin table as SeqRecords, fetching their sequence data from the reference.
ref_contigs is a dictionary of ref contig sequences created with BioPython's SeqIO.to_dict().
For documentation on the Sequin table format, see: https://www.ncbi.nlm.nih.gov/Sequin/table.html
Returns a dictionary with contig names as keys, and lists of (start, end, rev_strand, SeqRecord,
coding_blocks) tuples for each contig in ref_contigs.
"""
annots = defaultdict(list)
# We need a dummy class to hold the current state while parsing
# (otherwise the below private functions can't modify it; there's no "nonlocal" in python 2.x)
class _:
in_contig = None
in_feature = None
gene_name = None
desc = None
chrom_start = None
chrom_end = None
strand = None
feature_seq_str = ""
coding_blocks = []
def _save_sequin_feature():
# The only features we care about are the CDS features. Others get discarded during parsing.
if _.in_feature == "CDS":
if len(_.feature_seq_str) == 0:
if not quiet: sys.stderr.write("WARN: 0-length CDS in contig %s" % _.in_contig)
elif _.gene_name is None or _.strand is None or _.chrom_start is None or _.chrom_end is None:
if not quiet: sys.stderr.write("WARN: invalid CDS feature in contig %s" % _.in_contig)
else:
gene_seq = Seq(_.feature_seq_str, generic_dna)
if _.strand == '-':
gene_seq = gene_seq.reverse_complement()
gene_seq_record = SeqRecord(gene_seq, id=_.gene_name, name=_.gene_name, description=_.desc)
annot = Annot(_.chrom_start, _.chrom_end, _.strand == '-', gene_seq_record,
_.coding_blocks)
annots[contig_to_vcf_chrom(_.in_contig)].append(annot)
_.in_feature = _.gene_name = _.desc = _.chrom_start = _.chrom_end = _.strand = None
_.feature_seq_str = ""
_.coding_blocks = []
def _update_sequin_feature(fields):
if fields[0] != "" and fields[1] != "":
# If the first two fields are present, this specifies a sequence range
if not (fields[0].isdigit() and fields[1].isdigit()):
# We will only attempt to utilize *complete* CDS features
# (None of the start or end positions can be qualified by ">" or "<")
_.in_feature = "CDS-partial"
return
# Append the specified sequence to the `_.feature_seq_str`.
# Note: Sequin table coordinates, like GenBank, are 1-indexed, right-closed.
start = int(fields[0])
end = int(fields[1])
if _.strand is None:
_.strand = '+' if start <= end else '-'
elif _.strand != ('+' if start <= end else '-'):
sys.stderr.write("WARN: strand changed direction, invalid CDS")
_.in_feature = "CDS-partial"
return
if _.strand == '-':
start, end = end, start
start -= 1
ref_contig = ref_contigs[_.in_contig]
seg = str(ref_contig.seq)[start:end]
_.coding_blocks.append((start, end))
_.feature_seq_str = seg + _.feature_seq_str if _.strand == '-' else _.feature_seq_str + seg
_.chrom_start = min(start, _.chrom_start if _.chrom_start is not None else float('inf'))
_.chrom_end = max(end, _.chrom_end if _.chrom_end is not None else float('-inf'))
elif len(fields) >= 5:
# If the first three fields are blank, this specifies a qualifier key + value
if fields[3] == "gene":
_.gene_name = fields[4]
elif fields[3] == "product":
_.desc = fields[4]
with open(sequin_path) as f:
for line in f:
line = line.rstrip("\n")
fields = line.split("\t", 4)
if len(line.strip()) == 0:
# Whitespace-only lines signal the end of feature data for a contig.
# They may be followed by INFO: lines from the annotator, which we ignore.
_save_sequin_feature()
_.in_contig = None
elif _.in_contig is None and line[0] == '>':
# Lines that begin with ">Feature " signal the start of feature data for a contig
# Fields are separated by spaces; the second field is the full contig ID
_save_sequin_feature()
sp_fields = line[1:].split(' ')
if sp_fields[0] == 'Feature' and len(sp_fields) >= 2:
if ref_contigs.has_key(sp_fields[1]):
_.in_contig = sp_fields[1]
elif not quiet:
sys.stderr.write("WARN: unknown contig in Sequin file: %s" % sp_fields[1])
elif _.in_contig is not None:
if len(fields) < 3:
if not quiet: sys.stderr.write("WARN: incomplete Sequin line: %s" % line)
next
in_new_feature = fields[2].strip() != ""
if _.in_feature is None or in_new_feature:
_save_sequin_feature()
_.in_feature = fields[2].strip()
if _.in_feature == "CDS":
_update_sequin_feature(fields)
elif _.in_feature == "CDS":
_update_sequin_feature(fields)
return annots | 5,354,370 |
def transformer_decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
cache=None,
gather_idx=None,
param_initializer=None,
name=''):
"""
The layer to be stacked in decoder part.
:param dec_input: (batch_size, tgt_len, emb_dim)
:param enc_output: (batch_size, n_tokens, emb_dim)
:param slf_attn_bias: (batch_size, n_head, tgt_len, tgt_len)
:param dec_enc_attn_bias: (batch_size, n_head, tgt_len, n_tokens)
"""
# (batch_size, tgt_len, emb_dim)
slf_attn_output = multi_head_attention(
queries=pre_process_layer(out=dec_input, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_slf_attn'),
keys=None,
values=None,
attn_bias=slf_attn_bias, # (batch_size, n_head, tgt_len, tgt_len)
d_key=d_key,
d_value=d_value,
d_model=d_model,
n_head=n_head,
dropout_rate=attention_dropout,
cache=cache,
gather_idx=gather_idx,
param_initializer=param_initializer,
name=name + '_slf_attn')
# add dropout and residual connection
# (batch_size, tgt_len, emb_dim)
slf_attn_output = post_process_layer(
prev_out=dec_input,
out=slf_attn_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_slf_attn')
# (batch_size, tgt_len, emb_dim)
context_attn_output = multi_head_attention(
queries=pre_process_layer(out=slf_attn_output, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_context_attn'),
keys=enc_output, # (batch_size, n_tokens, emb_dim)
values=enc_output, # (batch_size, n_tokens, emb_dim)
attn_bias=dec_enc_attn_bias, # (batch_size, n_head, tgt_len, n_tokens)
d_key=d_key,
d_value=d_value,
d_model=d_model,
n_head=n_head,
dropout_rate=attention_dropout,
cache=cache,
gather_idx=gather_idx,
static_kv=True,
param_initializer=param_initializer,
name=name + '_context_attn')
# add dropout and residual connection
context_attn_output = post_process_layer(
prev_out=slf_attn_output,
out=context_attn_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_context_attn')
ffd_output = positionwise_feed_forward(
x=pre_process_layer(out=context_attn_output, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid=d_inner_hid,
d_hid=d_model,
dropout_rate=relu_dropout,
hidden_act=hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
# add dropout and residual connection
dec_output = post_process_layer(
prev_out=context_attn_output,
out=ffd_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_ffn')
return dec_output | 5,354,371 |
def replace_word_choice(sentence: str, old_word: str, new_word: str) -> str:
"""Replace a word in the string with another word.
:param sentence: str - a sentence to replace words in.
:param old_word: str - word to replace
:param new_word: str - replacement word
:return: str - input sentence with new words in place of old words
"""
return sentence.replace(old_word, new_word) | 5,354,372 |
def append_slash(url):
"""Make sure we append a slash at the end of the URL otherwise we
have issues with urljoin Example:
>>> urlparse.urljoin('http://www.example.com/api/v3', 'user/1/')
'http://www.example.com/api/user/1/'
"""
if url and not url.endswith('/'):
url = '{0}/'.format(url)
return url | 5,354,373 |
def find_cards(thresh_image):
"""Finds all card-sized contours in a thresholded camera image.
Returns the number of cards, and a list of card contours sorted
from largest to smallest."""
# Find contours and sort their indices by contour size
dummy, cnts, hier = cv2.findContours(thresh_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
index_sort = sorted(range(len(cnts)), key=lambda i: cv2.contourArea(cnts[i]), reverse=True)
# If there are no contours, do nothing
if len(cnts) == 0:
return [], []
# Otherwise, initialize empty sorted contour and hierarchy lists
cnts_sort = []
hier_sort = []
cnt_is_card = np.zeros(len(cnts), dtype=int)
# Fill empty lists with sorted contour and sorted hierarchy. Now,
# the indices of the contour list still correspond with those of
# the hierarchy list. The hierarchy array can be used to check if
# the contours have parents or not.
for i in index_sort:
cnts_sort.append(cnts[i])
hier_sort.append(hier[0][i])
# Determine which of the contours are cards by applying the
# following criteria: 1) Smaller area than the maximum card size,
# 2), bigger area than the minimum card size, 3) have no parents,
# and 4) have four corners
for i in range(len(cnts_sort)):
size = cv2.contourArea(cnts_sort[i])
peri = cv2.arcLength(cnts_sort[i], True)
approx = cv2.approxPolyDP(cnts_sort[i], 0.01*peri, True)
if ((size < CARD_MAX_AREA) and (size > CARD_MIN_AREA) and (hier_sort[i][3] == -1) and (len(approx) == 4)):
cnt_is_card[i] = 1
return cnts_sort, cnt_is_card | 5,354,374 |
def test_version():
"""Check that PyProject and __version__ are equivalent."""
data = Path('pyproject.toml').read_text()
result = tomli.loads(data)['tool']['poetry']['version']
assert result == __version__ | 5,354,375 |
def plot_substructure_PMF(path, temps_to_plot, legend = True, legend_loc=None, upper_cutoff=25, integrate_out = [],states_to_plot = 'All' , linewidth = 1.3, alpha = 0.4,legend_fontsize = 40, ax = None, y_space=0.5, labelsize = 35,fontsize=30, label_fontsize=20, markersize = 120, temp_norm = 1):
"""
path indicates where the PMFs that you want to plot are located...for instance 'ADK_umbrella_multistart/Substructure_PMF.dat'
We do not plot any free eneriges with value higher than upper_cutoff
y_space is how much vertical distance we require between labels
Loops through every pair of points with consecutive N, (ex. N=1 and N=2), and if
they differ by only one substructure (ex. 01000 and 01100), draw a line between them,
making sure lines are behind text to avoid collisions
Can also apply the ad hoc function switch labels in case you want to, say, rename 110 as 101
Then you would have optional argument switch=(1,2)
You can also choose to plot only specific states, such as 'a, 'ab', 'null', etc
BUt by default, you plot all states that have free energy less than upper cutoff
"""
PMF_info = joblib.load(path)
x = PMF_info['tops']
free_energies = PMF_info['top free energies'][:,:,0] #only take the first page, since second is uncertainties, which this function does not include
temperatures = PMF_info['eq temps']
temperatures=np.array(temperatures)
x=np.array(x)
for t,temp in enumerate(temps_to_plot):
indices=np.where(temperatures==temp)[0][0]
if type(indices)==np.int64:
indices=np.where(temperatures==temp)[0]
PMFs_to_plot=free_energies[np.array(indices), :]
for f in range(len(x)):
if np.min(PMFs_to_plot[:,f])>upper_cutoff:
PMFs_to_plot[:,f]=np.nan
if states_to_plot !='All':
for s, state in enumerate(x):
state_string = utils.barcodes_to_labels(state)
if state_string not in states_to_plot:
PMFs_to_plot[:,s]=np.nan
keep_indices=~np.isnan(PMFs_to_plot)
#print(keep_indices)
PMFs_to_plot=PMFs_to_plot[:,keep_indices[0,:]]
x_to_plot=x[keep_indices[0,:]]
N_substructures=[] #how many substructures are formed at each configuration
for i, xx in enumerate(x_to_plot):
if xx == '∅':
N_substructures.append(0)
else:
N_substructures.append(len(xx))
if ax ==None: fig, ax = plt.subplots()
if temp_norm != 1:
label='T={} $T_M$'.format(round(temp/temp_norm, 2))
else:
label = 'T = {}'.format(temp)
ax.scatter(N_substructures, PMFs_to_plot,label = label, s = markersize)
#We now annotate the plot, keeping track systematically of where we add each annotation, to avoid clashes
N_substructures=np.array(N_substructures)
for N in sorted(list(set(N_substructures))): #loop throuhg all the N values that are present, in order
annot_y_values=[] #heights for the existing annotations
indices=np.where(N_substructures==N)[0] #Which configurations have the current value of N
indices=indices[np.argsort(PMFs_to_plot[0,indices])] #sort these configurations in order of increasing PMF
if N<np.max(N_substructures):
next_indices=np.where(N_substructures==N+1)[0]
for i in indices:
tentative_y=PMFs_to_plot[0,i]-0.05 #where we hope to add annotation: Right next to PMF y value
if len(annot_y_values)>0 and tentative_y - np.max(annot_y_values)<y_space:
y=np.max(annot_y_values)+y_space
else:
y=tentative_y
annot_y_values.append(y)
a=x_to_plot[i]
#if N==0: str_to_plot='$\emptyset$'
ax.annotate(x_to_plot[i], xy=(N+0.1, y), fontsize=label_fontsize )
#draw line between configurations if they differ by one substructure
if N<np.max(N_substructures):
for j in next_indices:
b=x_to_plot[j]
if utils.connect_tops(a, b):
ax.plot([N, N+1], [PMFs_to_plot[0,i], PMFs_to_plot[0,j]], color='blue', linestyle=':', linewidth=linewidth, alpha=alpha)
#plt.ylim((0, upper_cutoff))
ax.tick_params(axis='both', labelsize=labelsize)
ax.set_xlim(np.min(N_substructures)-0.5, np.max(N_substructures)+0.9)
ax.set_xticks(np.arange(np.min(N_substructures), np.max(N_substructures)+1, 1))
#plt.yticks()
if legend:
if legend_loc!=None:
ax.legend(fontsize = legend_fontsize, loc = legend_loc, frameon = None)
else:
ax.legend( fontsize=legend_fontsize, frameon = None)
ax.set_xlabel('Number of substructures formed', fontsize=fontsize, labelpad=15)
ax.set_ylabel('Free energy($k_{B}T$)', fontsize=fontsize) | 5,354,376 |
def _count_partial_errors(client: GoogleAdsClient,
conversion_upload_response) -> int:
"""Counts the partial errors in the GAds response.
Args:
client: A GoogleAdsClient instance
conversion_upload_response: Google Upload Conversion service response.
Returns:
An integer representing the total number of partial errors in the response
failure error.
A list containing the code, message and number of times that each unique
error code was returned by the API for one of the conversions uploaded.
"""
error_count = 0
error_stats = {}
error_array = []
if _is_partial_failure_error_present(conversion_upload_response):
partial_failure = getattr(conversion_upload_response,
'partial_failure_error', None)
error_details = getattr(partial_failure, 'details', [])
for error_detail in error_details:
failure_message = client.get_type('GoogleAdsFailure')
google_ads_failure = type(failure_message)
failure_object_des = google_ads_failure.deserialize(error_detail.value)
error_count += len(failure_object_des.errors)
for error in failure_object_des.errors:
str_code = str(error.error_code).strip()
if str_code in error_stats:
error_stats[str_code]['count'] += 1
else:
error_stats[str_code] = {}
error_stats[str_code]['count'] = 1
error_stats[str_code]['message'] = str(error.message).strip()
print('A partial failure at index '
f'{error.location.field_path_elements[0].index} occurred '
f'\nError message: {error.message}\nError code: '
f'{error.error_code}')
for code_key in error_stats:
error_array.append({
'code': code_key,
'message': error_stats[code_key]['message'],
'count': error_stats[code_key]['count']
})
return error_count, error_array | 5,354,377 |
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
_add_roles_for_objects(SCOPING_OBJECTS, NEW_ROLES)
acr_propagation.propagate_roles(
acr_constants.GGRC_NEW_ROLES_PROPAGATION,
with_update=True
) | 5,354,378 |
def cumulative_similarity(atoms, representations,
threshold=0.98):
"""
"""
u_representations = [representations[0]]
s_idxs = [0]
for i, representation in enumerate(representations[1:]):
i += 1
similar = merge_asymmetric_similarity(atoms,
[representation],
u_representations,
threshold=threshold)
# We are only looking at one representation
similar = similar[0]
if len(similar) > 0:
continue
u_representations += [representation]
s_idxs += [i]
return np.asarray(s_idxs) | 5,354,379 |
def generate_job_performance_data(
job: models.StoredJob,
si: storage.StorageInterface,
types: List[models.JobDataTypeEnum],
performance_granularity: Optional[models.PerformanceGranularityEnum],
) -> Generator[pd.DataFrame, None, None]:
"""Generator to fetch job performance data at the inverter level."""
data_id_by_schema_path = {
do.definition.schema_path: do.object_id
for do in job.data_objects
if do.definition.type in types
}
# no data in types or no performance
if not data_id_by_schema_path or performance_granularity is None:
return
job_id = job.object_id
num_inverters = len(job.definition.system_definition.inverters)
if performance_granularity == models.PerformanceGranularityEnum.system:
data_id = data_id_by_schema_path["/"]
df = _get_data(job_id, data_id, si)
for i in range(num_inverters):
yield df.copy()
elif performance_granularity == models.PerformanceGranularityEnum.inverter:
for i in range(num_inverters):
data_id = data_id_by_schema_path[f"/inverters/{i}"]
df = _get_data(job_id, data_id, si)
yield df
else:
raise ValueError(f"Unknown performance granularity {performance_granularity}") | 5,354,380 |
def get_project_by_id(project_id):
"""
Retrieve a project by its Id. Returns None if no project is found.
"""
try:
return Project.objects.get(pk=project_id)
except Project.DoesNotExist:
return None | 5,354,381 |
def create_colorbar(
labels: pd.DataFrame,
tree: CassiopeiaTree,
colormap: Dict[str, Tuple[int, int, int]],
dataset_name: str,
output_directory: str = ".tmp/",
create_legend: bool = False,
) -> str:
"""Creates a colorbar file for the iTOL batch uploader
Creates a colorbar file for iTOL from categorical data. This will write out
the file to the specified location, which can then be uploaded to iTOL.
Args:
labels: A pandas series with categorical data (can be represented as strings
or categories)
tree: CassiopeiaTree
colormap: A mapping from category to RGB colors
dataset_name: Name for the dataset
output_directory: Where to write the output file
create_legend: Include legend for this colorbar.
Returns:
The filepath to new colorbar file.
"""
_leaves = tree.leaves
labelcolors_iTOL = []
for i in labels.loc[_leaves].values:
colors_i = colormap[i]
color_i = (
"rgb("
+ str(colors_i[0])
+ ","
+ str(colors_i[1])
+ ","
+ str(colors_i[2])
+ ")"
)
labelcolors_iTOL.append(color_i)
dfCellColor = pd.DataFrame()
dfCellColor["cellBC"] = _leaves
dfCellColor["color"] = labelcolors_iTOL
# save file with header
header = [
"DATASET_COLORSTRIP",
"SEPARATOR TAB",
"COLOR\t#FF0000",
"MARGIN\t100",
f"DATASET_LABEL\t{dataset_name}",
"STRIP_WIDTH\t100",
"SHOW_INTERNAL\t0",
"",
]
outfp = os.path.join(output_directory, f"{dataset_name}.txt")
with open(outfp, "w") as SIDout:
for line in header:
SIDout.write(line + "\n")
if create_legend:
number_of_items = len(colormap)
SIDout.write(f"LEGEND_TITLE\t{dataset_name} legend\n")
SIDout.write("LEGEND_SHAPES")
for _ in range(number_of_items):
SIDout.write("\t1")
SIDout.write("\nLEGEND_COLORS")
for col in colormap.values():
SIDout.write(f"\t{rgb_to_hex(col)}")
SIDout.write("\nLEGEND_LABELS")
for key in colormap.keys():
SIDout.write(f"\t{key}")
SIDout.write("\n")
SIDout.write("\nDATA\n")
df_writeout = dfCellColor.to_csv(
None, sep="\t", header=False, index=False
)
SIDout.write(df_writeout)
return outfp | 5,354,382 |
def one_c(rand_gen):
"""
KS Test
:param rand_gen:
:return:
"""
# Now need to do the ks test
# This calculates the value for KS at given points
def ks_test(z):
if z == 0:
return 1
elif z < 1.18: # Numerically optimal cutoff
block = ((np.exp((-1. * np.pi ** 2) / (8 * z ** 2))))
p = (np.sqrt(2 * np.pi) / z) * \
(block + block ** 9 + block ** 25)
else:
block = np.exp(-2 * z ** 2)
p = 1 - 2 * (block - block ** 4 + block ** 9)
return 1 - p
def ks_test_part(points, values, bins):
summed_bins = sum(values)
distribution = []
for i in range(len(values)):
distribution.append(abs(sum(values[:i]) / summed_bins - norm.cdf(bins[i])))
distribution = np.asarray(distribution)
D = max(distribution)
z = D * (np.sqrt(len(points)) + 0.12 + 0.11 / np.sqrt(len(points)))
return D, ks_test(z)
sigma = 1
u = 0
num_samples = np.logspace(np.log10(10), np.log10(10 ** 5), num=50)
reference_ks = np.zeros(50)
reference_p_value = np.zeros(50)
ks = np.zeros(50)
p_value = np.zeros(50)
for index, sample in enumerate(num_samples):
sample = int(sample)
gauss = box_muller(rand_gen, sample)
gauss = map_to_guass(gauss, u=u, sigma=sigma)
ks[index], p_value[index] = common_test(gauss, ks_test_part)
reference_ks[index], reference_p_value[index] = kstest(gauss, "norm")
plt.plot(num_samples, ks, c='b', label='My KS Test')
plt.plot(num_samples, reference_ks, c='r', label='Scipy KS Test')
plt.xscale('log')
plt.yscale('log')
plt.xlabel("Number of Points")
plt.ylabel("KS Statistic (D)")
plt.legend(loc='best')
plt.savefig("plots/KStest.png", dpi=300)
plt.cla()
plt.plot(num_samples, p_value, c='b', label='My KS Test Probability')
plt.plot(num_samples, reference_p_value, c='r', label='Scipy KS Test Probability')
plt.xscale('log')
plt.yscale('log')
plt.xlabel("Number of Points")
plt.ylabel("Probability")
plt.legend(loc='best')
plt.savefig("plots/KStest_pvalue.png", dpi=300)
plt.cla() | 5,354,383 |
def compare_command_xml(wanted, command, **kwargs):
"""Create a Broadworks XML command fragment from the arguments"""
cmd = api.get_command_object(command, **kwargs)
check_command_xml(wanted, cmd) | 5,354,384 |
def divide_into_sentences(
text: str, num_of_senteces: int, is_reversed: bool = False, offset: int = 0
) -> str:
"""
This function divides the text into sentences and returns either the first X sentences or the last X sentences.
"""
tokens_sent = nltk.sent_tokenize(text)
# fix uncorrect dialog sentences
tokens_sent = fix_direct_speech_sentences(tokens_sent)
output_text: List[str] = []
if not is_reversed:
for i, sentence in enumerate(tokens_sent):
if i < offset:
continue
if i < num_of_senteces + offset:
output_text.append(sentence)
else:
break
else:
for i, sentence in enumerate(reversed(tokens_sent)):
if i < offset:
continue
if i < num_of_senteces + offset:
output_text.append(sentence)
else:
break
output_text.reverse()
return " ".join(output_text) | 5,354,385 |
def cat_files(files, output):
"""Reads the contents of all the files and copies them to the output.
Args:
files: A list of filenames
output: A file-like object in which all the data should be copied.
"""
for file in files:
with open(file, 'r') as fd:
shutil.copyfileobj(fd, output) | 5,354,386 |
def play(env):
"""
run this function in order to create a window and be able to play
this environment.
env: CarRacing env
"""
from pyglet.window import key
discretize = env.discretize_actions
if discretize == None:
a = np.array( [0.0, 0.0, 0.0] )
else:
a = np.array([0])
def key_press(k, mod):
global restart
if discretize == None:
if k==0xff0d: restart = True
if k==key.LEFT: a[0] = -1.0
if k==key.RIGHT: a[0] = +1.0
if k==key.UP: a[1] = +1.0
if k==key.DOWN: a[1] = -1.0
if k==key.SPACE: a[2] = +0.8 # set 1.0 for wheels to block to zero rotation
elif discretize == "hard":
if k==0xff0d: restart = True
if k==key.LEFT: a[0] = 1
if k==key.RIGHT: a[0] = 2
if k==key.UP: a[0] = 3
if k==key.SPACE: a[0] = 4
def key_release(k, mod):
if discretize == None:
if k==key.LEFT and a[0]==-1.0: a[0] = 0
if k==key.RIGHT and a[0]==+1.0: a[0] = 0
if k==key.UP: a[1] = 0
if k==key.DOWN: a[1] = 0
if k==key.SPACE: a[2] = 0
else:
a[0] = 0
if k==key.D: set_trace()
if k==key.R: env.reset()
if k==key.Z: env.change_zoom()
if k==key.G: env.switch_intersection_groups()
if k==key.I: env.switch_intersection_points()
if k==key.X: env.switch_xt_intersections()
if k==key.E: env.switch_end_of_track()
if k==key.S: env.switch_start_of_track()
if k==key.T: env.screenshot('./')
if k==key.Q: sys.exit()
env.render()
record_video = False
if record_video:
env.monitor.start('/tmp/video-test', force=True)
env.key_press_fn = key_press
env.key_release_fn = key_release
while True:
env.reset()
total_reward = 0.0
steps = 0
restart = False
while True:
if discretize != None: a_tmp = a[0]
else: a_tmp = a
s, r, done, info = env.step(a_tmp)
total_reward += r
if steps % 200 == 0 or done:
#print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
if not record_video: # Faster, but you can as well call env.render() every time to play full window.
env.render()
if done or restart: break
env.close() | 5,354,387 |
def intersection(bbox1: BoundingBox,
bbox2: BoundingBox) -> BoundingBox:
"""
Calculate the intersection of two bounding boxes.
"""
assert bbox1.x_min <= bbox1.x_max
assert bbox1.y_min <= bbox1.y_max
assert bbox2.x_min <= bbox2.x_max
assert bbox2.y_min <= bbox2.y_max
# determine the coordinates of the intersection rectangle
x_left = max(bbox1.x_min, bbox2.x_min)
y_top = max(bbox1.y_min, bbox2.y_min)
x_right = min(bbox1.x_max, bbox2.x_max)
y_bottom = min(bbox1.y_max, bbox2.y_max)
if x_right < x_left or y_bottom < y_top:
return EMPTY_BBOX
return BoundingBox(x_left, x_right, y_top, y_bottom) | 5,354,388 |
def maybe_load_checkpoint(train_loop_rngs: jnp.ndarray,
save_checkpoint_path: str,
init_optimizer: flax.optim.Optimizer,
init_params: Params,
init_fixed_model_states: Optional[Params],
default_reinit_params: Iterable[str],
config: ml_collections.ConfigDict) -> CheckpointData:
"""Loads a model from an existing checkpoint if so indicated by the config.
Whether to resume training, initialize from a previous checkpoint, or do
nothing is set by the `config` ConfigDict, based on the existence of fields
`resume` (resume training) or `model_init` (initialize from pretrained
checkpoint).
When resuming training, both the model weights and optimizer
state (including the training step) are restored. When initializing, only
the model parameters are updated.
The way in which initializing is prioritized in the following way:
1. Always resume from an existing checkpoint, e.g. resume a finetune job.
2. Resume from a previous checkpoint, e.g. start a cooldown training job.
3. Initialize model from something, e,g, start a fine-tuning job.
4. Do nothing (training from scratch).
Args:
train_loop_rngs: unreplicated jax.PRNGKey.
save_checkpoint_path: File pointing to pretrained checkpoint stored in NumPy
`.npz` file.
init_optimizer: flax.Optimizer to be updated.
init_params: Tree of (possibly randomly) initialized parameters for the
model.
init_fixed_model_states: Optional pytree of non-trainable parameters.
Currently only passed when using SNGP models.
default_reinit_params: List of parameter names to reinitialize if not
provided by the config file.
config: ConfigDict which contains fields indicating if, and how, to load an
available checkpoint into the optimizer. If resuming from a previous
checkpoint *to start a cooldown job*, the flag `resume` must be set. If
initializing a (subset of) model parameters to start a file tuning job,
fields `model_init`, `representation_size` and `classifier` must be set.
Returns:
A CheckpointData instance containing a new rng key, the new optimizer state,
the new untrainable parameters (if resuming from a checkpoint), and a
dictionary of information about the reloaded state.
"""
optimizer = init_optimizer
fixed_model_states = init_fixed_model_states
accum_train_time = 0.0
# TODO(dusenberrymw, zmariet): Directly return an unreplicated rng and the
# cumulative training time instead of storing them in `checkpoint_extra`.
checkpoint_extra = dict(
accum_train_time=accum_train_time,
rngs_loop=flax_utils.replicate(train_loop_rngs))
# Parse config file to figure out which setting we are in.
resume_from_checkpoint = (
(save_checkpoint_path is not None and gfile.exists(save_checkpoint_path))
or config.get("resume") is not None)
reinitialize_model = config.get(
"model_init") is not None and not resume_from_checkpoint
if resume_from_checkpoint:
logging.info("Resume training from checkpoint...")
# Always prioritize loading from a checkpoint from the current training job.
if save_checkpoint_path and gfile.exists(save_checkpoint_path):
resume_checkpoint_path = save_checkpoint_path
# Otherwise, we reload from a previous checkpoint provided by the config.
else:
resume_checkpoint_path = config.resume
checkpoint_tree = {"opt": init_optimizer, "extra": checkpoint_extra}
if init_fixed_model_states is not None:
checkpoint_tree["states"] = init_fixed_model_states
checkpoint = load_checkpoint(checkpoint_tree, resume_checkpoint_path)
optimizer, checkpoint_extra = checkpoint["opt"], checkpoint["extra"]
fixed_model_states = checkpoint.get("states", None)
elif reinitialize_model:
logging.info("Initialize model...")
reinit_params = config.get("model_reinit_params", default_reinit_params)
logging.info("Reinitializing these parameters: %s", reinit_params)
loaded = load_from_pretrained_checkpoint(
init_params=init_params,
pretrained_path=config.model_init,
model_representation_size=config.model.representation_size,
model_classifier=config.model.classifier,
reinit_params=reinit_params)
optimizer = init_optimizer.replace(target=loaded)
if jax.process_index() == 0:
logging.info("Restored parameter overview:")
parameter_overview.log_parameter_overview(loaded)
else:
logging.info("No checkpoint to recover from; using default initialization.")
return CheckpointData(
optimizer=optimizer,
fixed_model_states=fixed_model_states,
train_loop_rngs=checkpoint_extra["rngs_loop"],
accumulated_train_time=checkpoint_extra["accum_train_time"]) | 5,354,389 |
def transfer_meta_data(path_in, path_out):
"""Read input meta data and write it to the configuration file"""
ds = qpformat.load_data(path=path_in)
cfg = config.ConfigFile(path_out)
sec = cfg["meta"]
for key in sorted(META_MAPPER):
dskey, mult = META_MAPPER[key]
if (key not in sec or sec[key] is None) and dskey in ds.meta_data:
cfg.set_value("meta", key, ds.meta_data[dskey] * mult) | 5,354,390 |
def svg_to_clipboard(string):
""" Copy a SVG document to the clipboard.
Parameters
----------
string : basestring
A Python string containing a SVG document.
"""
if isinstance(string, unicode_type):
string = string.encode('utf-8')
mime_data = QtCore.QMimeData()
mime_data.setData('image/svg+xml', string)
QtGui.QApplication.clipboard().setMimeData(mime_data) | 5,354,391 |
def test_if_tech_defined(enduse_fueltypes_techs):
"""Test if a technology has been configured,
i.e. a fuel share has been assgined to one of the
fueltpyes in `fuel_shares`.
Arguments
---------
enduse_fueltypes_techs : dict
Configured technologies and fuel shares of an enduse
Returns
-------
c_tech_defined : bool
Criteria whether technologies have been configured
for an enduse or not
"""
c_tech_defined = False
for fueltype in enduse_fueltypes_techs:
if enduse_fueltypes_techs[fueltype] == {}:
pass
else:
c_tech_defined = True
break
return c_tech_defined | 5,354,392 |
def _fanTriangles(vertices, faces=None):
"""Create triangles by fanning out from vertices. Returns a
generator for vertex triplets. If faces is None, assume that
vertices are planar and indicate a polygon; otherwise, use the
face indices given in faces."""
vertices = np.asarray(vertices);
if faces is None:
if len(vertices) < 3:
return;
for tri in ((vertices[0], verti, vertj) for (verti, vertj) in
zip(vertices[1:], vertices[2:])):
yield tri;
else:
for face in faces:
for tri in ((vertices[face[0]], vertices[i], vertices[j]) for (i, j) in
zip(face[1:], face[2:])):
yield tri; | 5,354,393 |
def segment_rings(region, seeds, neighbor_lists, step=1, background_value=-1,
verbose=False):
"""
Iteratively segment a region of surface mesh as concentric segments.
Parameters
----------
region : list of integers
indices of region vertices to segment (such as a fold)
seeds : list of integers
indices of seed vertices
neighbor_lists : list of lists of integers
indices to neighboring vertices for each vertex
step : integer
number of segmentation steps before assessing segments
background_value : integer
background value
verbose : bool
print statements?
Returns
-------
segments : list of lists of integers
indices to vertices for each concentric segment
Examples
--------
>>> import numpy as np
>>> from mindboggle.mio.vtks import read_scalars
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.guts.segment import extract_borders
>>> from mindboggle.guts.segment import segment_rings
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> values, name = read_scalars(vtk_file, True, True)
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> background_value = -1
>>> fold, name = read_scalars(folds_file)
>>> indices = [i for i,x in enumerate(fold) if x != background_value]
>>> # Initialize seeds with the boundary of thresholded indices:
>>> use_threshold = True
>>> if use_threshold:
... # Threshold at the median depth or within maximum values in boundary:
... threshold = np.median(values[indices]) #+ np.std(values[indices])
... indices_high = [x for x in indices if values[x] >= threshold]
... # Make sure threshold is within the maximum values of the boundary:
... B = np.ones(len(values))
... B[indices] = 2
... borders, foo1, foo2 = extract_borders(list(range(len(B))), B, neighbor_lists)
... borders = [x for x in borders if values[x] != background_value]
... if list(frozenset(indices_high).intersection(borders)):
... threshold = np.max(values[borders]) + np.std(values[borders])
... indices_high = [x for x in indices if values[x] >= threshold]
... # Extract threshold boundary vertices as seeds:
... B = background_value * np.ones(len(values))
... B[indices_high] = 2
... seeds, foo1, foo2 = extract_borders(list(range(len(values))), B, neighbor_lists)
... # Or initialize P with the maximum value point:
... else:
... seeds = [indices[np.argmax(values[indices])]]
... indices_high = []
>>> indices = list(frozenset(indices).difference(indices_high))
>>> indices = list(frozenset(indices).difference(seeds))
>>> step = 1
>>> verbose = False
>>> segments = segment_rings(indices, seeds, neighbor_lists, step,
... background_value, verbose)
>>> len(segments)
56
>>> [len(x) for x in segments][0:10]
[5540, 5849, 6138, 5997, 4883, 3021, 1809, 1165, 842, 661]
>>> segments[0][0:10]
[65539, 65540, 98308, 98316, 131112, 131121, 131122, 131171, 131175, 131185]
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars # doctest: +SKIP
>>> S = background_value * np.ones(len(values)) # doctest: +SKIP
>>> for i, segment in enumerate(segments): S[segment] = i # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'segment_rings.vtk', S, 'segment_rings',
... [], -1) # doctest: +SKIP
>>> plot_surfaces('segment_rings.vtk') # doctest: +SKIP
"""
from mindboggle.guts.segment import segment_regions
segments = []
while seeds:
# Segment step-wise starting from seeds and through the region:
seeds_plus_new = segment_regions(region, neighbor_lists, 1, [seeds],
False, False, [], [], [],
step, background_value, verbose)
seeds_plus_new = [i for i,x in enumerate(seeds_plus_new)
if x != background_value]
# Store the new segment after removing the previous segment:
region = list(frozenset(region).difference(seeds))
seeds = list(frozenset(seeds_plus_new).difference(seeds))
if seeds:
# Add the new segment and remove it from the region:
segments.append(seeds)
region = list(frozenset(region).difference(seeds))
return segments | 5,354,394 |
def equal_spacing(L,w,justify="right"):
"""Print a single string with the elements of the list spaced out"""
s = ""
if justify == "right" or justify == "r":
for i in L:
s += f"{i:>{w}}"
elif justify == "left" or justify == "l":
for i in L:
s += f"{i:<{w}}"
elif justify == "center" or justify == "c":
for i in L:
s += f"{i}".center(w," ")
else:
raise Exception("Justify must be left or right.")
print(s) | 5,354,395 |
def classification_result(y, y_pred):
"""
:param y:
:param y_pred:
:return:
"""
assert len(y) == len(y_pred)
correct = []
wrong = []
for i in range(len(y)):
if y[i] == y_pred[i]:
correct.append(i)
else:
wrong.append(i)
return correct, wrong | 5,354,396 |
def flatgrad(loss, var_list, clip_norm=None):
"""Calculate the gradient and flatten it.
Parameters
----------
loss : float
the loss value
var_list : list of tf.Tensor
the variables
clip_norm : float
clip the gradients (disabled if None)
Returns
-------
list of tf.Tensor
flattened gradient
"""
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
]) | 5,354,397 |
async def get_image_from_message(
ctx,
url=None,
*,
return_type="image_RGBA",
search_last_messages=True,
accept_emojis=True,
accept_templates=True,
):
"""Get an image from a discord Context or check on images among the 100
last messages sent in the channel. Return bytes or PIL.Image image and the image url"""
assert return_type and return_type in ["image_RGBA", "image", "bytes"]
message_limit = 100
initial_message = None
if isinstance(ctx, commands.Context):
initial_message = ctx.message
try:
# try to get the image from the initial message
return await get_image(
initial_message,
url,
return_type,
accept_emojis,
accept_templates,
accept_embeds=False,
)
except ImageNotFoundError as e:
# if the message is a reply, we try to find an image in the replied message
ref = initial_message.reference if initial_message else None
if ref and isinstance(ref.resolved, disnake.Message):
reply_message = ref.resolved
try:
return await get_image(
reply_message,
url=None,
return_type=return_type,
accept_emojis=False,
accept_templates=False,
accept_embeds=True,
)
except Exception:
pass
# if no image was found in the message we check for images in the last
# 100 messages sent in the channel
if search_last_messages:
async for message in ctx.channel.history(limit=message_limit):
if message != initial_message:
try:
return await get_image(
message,
url=None,
return_type=return_type,
accept_emojis=False,
accept_templates=False,
accept_embeds=True,
)
except Exception:
pass
# no image was found in the last 100 images
raise ValueError(e)
except ValueError as e:
# if an image was found but an error occurred, we raise it
raise ValueError(e) | 5,354,398 |
def test_ifThen():
"""
This function allows to execute a callable on an object only if it
has a valid value. ifThen(value,callable) will return callable(value)
only if value is not in falsables.
It is a List-like method, it can be combined with fandango.excepts.trial
"""
#assert fandango.functional.ifThen | 5,354,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.