content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def resize_bilinear_nd(t, target_shape):
"""Bilinear resizes a tensor t to have shape target_shape.
This function bilinearly resizes a n-dimensional tensor by iteratively
applying tf.image.resize_bilinear (which can only resize 2 dimensions).
For bilinear interpolation, the order in which it is applied does not matter.
Args:
t: tensor to be resized
target_shape: the desired shape of the new tensor.
Returns:
The resized tensor
"""
shape = list(t.shape)
target_shape = list(target_shape)
assert len(shape) == len(target_shape)
# We progressively move through the shape, resizing dimensions...
d = 0
while d < len(shape):
# If we don't need to deal with the next dimension, step over it
if shape[d] == target_shape[d]:
d += 1
continue
# Otherwise, we'll resize the next two dimensions...
# If d+2 doesn't need to be resized, this will just be a null op for it
new_shape = shape[:]
new_shape[d:d+2] = target_shape[d:d+2]
# The helper collapse_shape() makes our shapes 4-dimensional with
# the two dimensions we want to deal with on the outside.
shape_ = collapse_shape(shape, d, d+2)
new_shape_ = collapse_shape(new_shape, d, d+2)
# We can then reshape and use torch.nn.Upsample() on the
# outer two dimensions.
t_ = t.view(shape_)
# transpose [0, 1, 2, 3] to [0, 3, 1, 2]
t_ = torch.transpose(t_, 1, 3)
t_ = torch.transpose(t_, 2, 3)
upsample = torch.nn.Upsample(size=new_shape_[1:3], mode='bilinear', align_corners=True)
t_ = upsample(t_)
t_ = torch.transpose(t_, 2, 3)
t_ = torch.transpose(t_, 1, 3)
# And then reshape back to our uncollapsed version, having finished resizing
# two more dimensions in our shape.
t = t_.reshape(new_shape)
shape = new_shape
d += 2
return t | 5,357,700 |
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [-1, -1, -1]
with open(disambiguatestatsfilename, "r") as in_handle:
header = in_handle.readline().strip().split("\t")
if header == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']:
disambig_stats_tmp = in_handle.readline().strip().split("\t")[1:]
if len(disambig_stats_tmp) == 3:
disambig_stats = [int(x) for x in disambig_stats_tmp]
return disambig_stats | 5,357,701 |
def clean_vigenere(text):
"""Convert text to a form compatible with the preconditions imposed by Vigenere cipher."""
return ''.join(ch for ch in text.upper() if ch.isupper()) | 5,357,702 |
def close_room(room_name):
"""Close a room to anonymous users.
members only documentation:
https://xmpp.org/extensions/xep-0045.html#enter-members
Parameters
----------
room_name: string
The name of the room you want to destroy.
"""
client = _connect()
client.send(
xmpp.Presence(
to=f"{room_name}@{settings.XMPP_CONFERENCE_DOMAIN}/admin",
payload=[xmpp.Node(tag="x", attrs={"xmlns": xmpp.NS_MUC})],
)
)
# request the current room config
default_config_iq = client.SendAndWaitForResponse(
xmpp.Iq(
to=f"{room_name}@{settings.XMPP_CONFERENCE_DOMAIN}",
frm=settings.XMPP_PRIVATE_ADMIN_JID,
typ="get",
queryNS=xmpp.NS_MUC_OWNER,
)
)
data = []
fileds_to_exclude = [
"muc#roomconfig_membersonly",
]
# Remove config we want to modify
for children in default_config_iq.getQueryPayload()[0].getChildren():
if (
children.getName() == "field"
and children.getAttr("var") not in fileds_to_exclude
):
data.append(children)
# Add our own config
data = data + [
xmpp.DataField(typ="boolean", name="muc#roomconfig_membersonly", value=1),
]
client.send(
xmpp.Iq(
to=f"{room_name}@{settings.XMPP_CONFERENCE_DOMAIN}",
frm=settings.XMPP_PRIVATE_ADMIN_JID,
typ="set",
queryNS=xmpp.NS_MUC_OWNER,
payload=[
xmpp.DataForm(
typ="submit",
data=data,
)
],
)
) | 5,357,703 |
def select_artist(df_by_artists, df_rate):
"""This method selects artists which perform the same genre as
artists were given
:param df_by_artists:
:param df_rate:
"""
# save the indices of artists, which include any of the genres in the genre profile
list_of_id = []
for index, row in df_by_artists.iterrows():
for genre in row["genres"]:
if(genre in df_rate.index):
list_of_id.append(index)
#find the unique indices
list_of_id = list(set(list_of_id))
#select the artists and genres columns of the artists including any of the genres in the genre profile
df_select_columns = df_by_artists.iloc[list_of_id, [col(df_by_artists, "artists"), col(df_by_artists, "genres")]]
df_select = df_select_columns.copy()
#create the artist-genre-matrix of new artists
for index, row in df_select_columns.iterrows():
for genre in row['genres']:
#artist includes genre: 1
df_select.at[index, genre] = 1
#artist does not include genre: 0
df_select = df_select.fillna(0)[df_rate.index]
return df_select | 5,357,704 |
def generate_uuid(class_name: str, identifier: str) -> str:
""" Generate a uuid based on an identifier
:param identifier: characters used to generate the uuid
:type identifier: str, required
:param class_name: classname of the object to create a uuid for
:type class_name: str, required
"""
return str(uuid.uuid5(uuid.NAMESPACE_DNS, class_name + identifier)) | 5,357,705 |
def fetch(model, key):
"""Fetch by ID."""
return db.session.query(model).get(key) | 5,357,706 |
def construct_filename(prefix: str, suffix: Optional[str] = '.csv') -> str:
"""Construct a filename containing the current date.
Examples
--------
.. code:: python
>>> filename = construct_filename('my_file', '.txt')
>>> print(filename)
'my_file_31_May_2019.txt'
Parameters
----------
prefix : :class:`str`
A prefix for the to-be returned filename.
The current date will be appended to this prefix.
sufix : :class:`str`, optional
An optional sufix of the to be returned filename.
No sufix will be attached if ``None``.
Returns
-------
:class:`str`
A filename consisting of **prefix**, the current date and **suffix**.
"""
today = date.today()
suffix = suffix or ''
return prefix + today.strftime('_%d_%b_%Y') + suffix | 5,357,707 |
def make_friedman_model(point1, point2):
"""
Makes a vtk line source from two set points
:param point1: one end of the line
:param point2: other end of the line
:returns: The line
"""
line = vtkLineSource()
line.SetPoint1(point1)
line.SetPoint2(point2)
return line | 5,357,708 |
def test_fixture(request):
"""
This is a test fixture
"""
self = request.node.cls
def finalizer():
teardown(self)
request.addfinalizer(finalizer)
setup(self) | 5,357,709 |
def display_repositories_by_owner( repository_tups ):
"""Group summary display by repository owner."""
repository_tups_by_owner = {}
for repository_tup in repository_tups:
name, owner, changeset_revision = repository_tup
if owner:
if owner in repository_tups_by_owner:
processed_repository_tups_by_owner = repository_tups_by_owner.get( owner, [] )
if repository_tup not in processed_repository_tups_by_owner:
repository_tups_by_owner[ owner ].append( repository_tup )
else:
repository_tups_by_owner[ owner ] = [ repository_tup ]
# Display grouped summary.
for owner, repository_tups in repository_tups_by_owner.items():
print "# "
for repository_tup in repository_tups:
name, owner, changeset_revision = repository_tup
print "# Revision %s of repository %s owned by %s" % ( changeset_revision, name, owner ) | 5,357,710 |
def breadcrumbs_pcoa_plot(pcl_fname, output_plot_fname, **opts):
"""Use breadcrumbs `scriptPcoa.py` script to produce principal
coordinate plots of pcl files.
:param pcl_fname: String; file name of the pcl-formatted taxonomic profile
to visualize via `scriptPcoa.py`.
:param output_plot_fname: String; file name of the resulting image file.
:keyword **opts: Any additional keyword arguments are passed to
`scriptPcoa.py` as command line flags. By default,
it passes `meta=None`, `id=None` and `noShape=None`,
which are converted into `--meta`, `--id`, and
`--noShape`, respectively.
External dependencies
- Breadcrumbs: https://bitbucket.org/biobakery/breadcrumbs
"""
pcoa_cmd = ("scriptPcoa.py ")
default_opts = {
"meta" : True,
"id" : True,
"noShape" : True,
"outputFile" : output_plot_fname
}
default_opts.update(opts)
def sample_id(fname):
id_ = str()
with open(fname) as f:
for line in f:
if line.startswith("#"):
id_ = line.split('\t')[0]
continue
else:
return id_ or line.split('\t')[0]
def last_meta_name(fname):
prev_line = str()
with open(fname) as f:
for line in f:
if re.search(r'[Bb]acteria|[Aa]rchaea.*\s+\d', line):
return prev_line.split('\t')[0]
prev_line = line
return prev_line.split('\t')[0]
def run(pcoa_cmd=pcoa_cmd):
if default_opts['meta'] is True or not default_opts['meta']:
default_opts['meta'] = last_meta_name(pcl_fname)
if default_opts['id'] is True or not default_opts['id']:
default_opts['id'] = sample_id(pcl_fname)
pcoa_cmd += dict_to_cmd_opts(default_opts)
pcoa_cmd += " "+pcl_fname+" "
return CmdAction(pcoa_cmd, verbose=True).execute()
targets = [output_plot_fname]
if 'CoordinatesMatrix' in default_opts:
targets.append(default_opts['CoordinatesMatrix'])
yield {
"name": "breadcrumbs_pcoa_plot: "+output_plot_fname,
"actions": [run],
"file_dep": [pcl_fname],
"targets": targets
} | 5,357,711 |
def _parse_xml(buff):
"""\
Parses XML and returns the root element.
"""
buff.seek(0)
return etree.parse(buff).getroot() | 5,357,712 |
def delete_left_buckets(
request: pytest.FixtureRequest, storage_client: SupabaseStorageClient
):
"""Ensures no test buckets are left when a test that created a bucket fails"""
def finalizer():
for bucket in temp_test_buckets_ids:
try:
storage_client.empty_bucket(bucket.id)
storage_client.delete_bucket(bucket.id)
except StorageException as e:
# Ignore 404 responses since they mean the bucket was already deleted
response = e.args[0]
if response["statusCode"] != 404:
raise e
continue
request.addfinalizer(finalizer) | 5,357,713 |
def After(interval):
""" After waits for the duration to elapse and then sends the current time
on the returned channel.
It is equivalent to Timer(interval).c
"""
return Timer(interval).c | 5,357,714 |
def f_score(r: float, p: float, b: int = 1):
"""
Calculate f-measure from recall and precision.
Args:
r: recall score
p: precision score
b: weight of precision in harmonic mean
Returns:
val: value of f-measure
"""
try:
val = (1 + b ** 2) * (p * r) / (b ** 2 * p + r)
except ZeroDivisionError:
val = 0
return val | 5,357,715 |
def is_container_system_config_file(file):
"""Determine whether a given file is one of the files created by setup_container_system_config().
@param file: Absolute file path as string.
"""
if not file.startswith("/etc/"):
return False
return file in [os.path.join("/etc", f.decode()) for f in CONTAINER_ETC_FILE_OVERRIDE] | 5,357,716 |
def show_inventory():
"""Show the user what is in stock."""
context = {
'inventory': [ # Could contain any items
{'name': 'apple', 'price': 1.00},
{'name': 'banana', 'price': 1.20},
{'name': 'carrot', 'price': 2.00},
]
}
return render_template('show_inventory.html', **context) | 5,357,717 |
def sweep(airfoils, res, min_alfa=4, write_file=True, plots_on=False, panels=200):
"""
Runs a large sweep over airfoil and re range
@param airfoils iterable of NACA numbers to sweep over
@param res iterable reynolds numbers to sweep over
@param write_file boolean indicating whether or not to create polars
@param plots_on boolean indicating whether or not to simulate with plots on
@param panels included as per original genpolar file
"""
os.chdir(cwd)
sessionlog.comment("Beginning sweep with minimum alfa of " + str(min_alfa))
xf = pyxfoil.session(logfile='sweep', plots=plots_on, force_zero=True)
xf.naca('0010')
xf.set_panels(panels)
timeouts = 0
start_time = time.time()
last_time = start_time
airfoils, res = list(airfoils), list(res)
for naca in airfoils:
xf.naca(naca)
for re in res:
percentage = 100*round((airfoils.index(naca)*len(res)+res.index(re)+1.)/(len(airfoils)*len(res)), 5)
polarname = "NACA" + naca + "_Re" + str(int(round(re/1000))).zfill(8) + "k.pol"
if polarname in get_existing():
print "NACA " + naca + " Re " + (str(int(re/1000)) + 'k').rjust(8) + " has already been run: skipping (" + str(percentage) + "%)"
continue
xf.set_re(re)
try:
xf.generate_polar(filename=polarname, min_alfa=min_alfa, writefile=write_file)
sessionlog.comment("NACA " + naca + ", re=" + str(re) + " simulation complete.")
this_time = time.time()
print str(percentage) + "% complete, " + str(round(this_time-last_time, 3)) + " seconds"
last_time = this_time
except pexpect.TIMEOUT:
xf.force_quit()
print "XFOIL timed out at NACA=" + naca + " Re=" + str(re)
sessionlog.timeout(naca, re)
timeouts += 1
print "Attempting to restarting at current set."
xf = pyxfoil.session(airfoil=naca, re=re, logfile='sweep', plots=plots_on, force_zero=True)
try:
xf.generate_polar(filename=polarname, min_alfa=min_alfa, writefile=write_file)
sessionlog.comment("NACA " + naca + ", Re=" + str(re) + " recovered on second try.")
this_time = time.time()
print str(percentage) + "% complete, " + str(round(this_time-last_time, 3)) + " seconds"
last_time = this_time
except pexpect.TIMEOUT:
xf.force_quit()
sessionlog.comment("NACA " + naca + ", Re=" + str(re) + " failed to recover on second try. Continuing at next set.")
print "NACA " + naca + ", Re=" + str(re) + " failed to recover on second try. Continuing at next set."
xf = pyxfoil.session(logfile='sweep', plots=plots_on, force_zero=True, airfoil=naca)
xf.quit()
total_seconds = time.time()-start_time
average_time = round(total_seconds/(len(res)*len(airfoils)), 3)
m, s = divmod(total_seconds, 60)
h, m = divmod(m, 60)
timeout_count = "Number of xfoil timeouts: " + str(timeouts)
completion_time = "Time to complete: " + str(h) + " hours " + str(m) + " minutes " + str(round(s, 3)) + " seconds."
simulation_count = "Number of simulations: " + str(len(airfoils) * len(res))
average_time = "Average simulation length: " + str(average_time) + ' seconds.'
sessionlog.comment(timeout_count)
sessionlog.comment(completion_time)
sessionlog.comment(simulation_count)
sessionlog.comment(average_time)
sessionlog.sweep_param(airfoils, res)
print timeout_count + '\n' + completion_time + '\n' + simulation_count + '\n' + average_time
os.chdir(cwd) | 5,357,718 |
def grid_optimizer(
data,
params,
args,
xset,
yset=None,
verbose=False,
visualize=False,
save_path=None):
"""
This function optimizes the ESN parameters, x and y, over a specified
range of values. The optimal values are determined by minimizing
the mean squared error. Those optimal values are returned.
Parameters
----------
data : numpy array
This is the dataset that the ESN should train and predict.
If the training length plus the future total exceed the
length of the data, an error will be thrown.
**The shape of the transpose of the data will determine
the number of inputs and outputs.**
params : dictionary
A dictionary containing all of the parameters required to
initialize an ESN.
Required parameters are:
* "n_reservoir" : int, the reservoir size
* "sparsity" : float, the sparsity of the reservoir
* "rand_seed" : int or None, specifies the initial seed
* "rho" : float, the spectral radius
* "noise" : the noise used for regularization
* "trainlen" : int, the training length
* "future" : int, the total prediction length
* "window" : int or None, the window size
args : list or tuple
The list of variables you want to optimize. Must be less
than or equal to two.
xset : numpy array
The first set of values to be tested. Cannot be None.
yset : numpy array or None
The second set of values to be tested at the same
time as the xset. Can be None.
verbose : boolean
Specifies if the simulation outputs should be printed.
Useful for debugging.
visualize : boolean, string
Specifies if the results should be visualized.
* 'surface' will plot a 3D error surface.
save_path : string
Specifies where the data should be saved. Default is None.
Returns
-------
loss : numpy array
The array or matrix of loss values.
"""
assert(len(args) <= 2), "Too many variables to optimize. Pick two or fewer."
for variable in args:
assert(variable in list(params.keys())
), f"{variable} not in parameters"
if len(args) > 1:
assert(yset is not None), "Two variables specified, two sets not given."
xvar = args[0]
loss = np.zeros(len(xset))
if yset is not None:
assert(len(args) > 1), "Second parameter set given, but not specified."
yvar = args[1]
loss = np.zeros([len(xset), len(yset)])
if verbose:
print(f"Optimizing over {args}:")
predictLen = params['future']
for x, xvalue in enumerate(xset):
params[xvar] = xvalue
if yset is not None:
for y, yvalue in enumerate(yset):
params[yvar] = yvalue
predicted = esn_prediction(data, params)
loss[x, y] = MSE(predicted, data[-predictLen:])
if verbose:
print(
f"{variables[xvar]} = {xvalue},"
f"{variables[yvar]} = {yvalue}, MSE={loss[x][y]}")
else:
predicted = esn_prediction(data, params)
loss[x] = MSE(predicted, data[-predictLen:])
if verbose:
print(f"{xvar} = {xvalue}, MSE={loss[x]}")
# =======================================================================
# Visualization
# =======================================================================
if visualize is True and yset is not None:
plt.figure(figsize=(16, 9), facecolor='w', edgecolor='k')
plt.title((f"Hyper-parameter Optimization over {variables[xvar]}",
f"and {variables[yvar]}"))
im = plt.imshow(loss.T,
vmin=abs(loss).min(),
vmax=abs(loss).max(),
origin='lower',
cmap='PuBu')
plt.xticks(np.linspace(0, len(xset) - 1,
len(xset)), xset)
plt.yticks(np.linspace(0, len(yset) - 1,
len(yset)), yset)
plt.xlabel(f'{variables[xvar]}', fontsize=16)
plt.ylabel(f'{variables[yvar]}', fontsize=16)
cb = plt.colorbar(im)
cb.set_label(label="Mean Squared Error",
fontsize=16,
rotation=-90,
labelpad=25)
elif visualize is True and yset is None:
plt.figure(figsize=(16, 9), facecolor='w', edgecolor='k')
plt.plot(xset, loss, '-ok', alpha=0.6)
plt.title(f'MSE as a Function of {variables[xvar]}', fontsize=20)
plt.xlabel(f'{variables[xvar]}', fontsize=18)
plt.ylabel('MSE', fontsize=18)
elif visualize is 'surface' and yset is not None:
fig = plt.figure(figsize=(16, 9), facecolor='w', edgecolor='k')
ax = plt.axes(projection='3d')
X = np.array(xset)
Y = np.array(yset)
Z = np.array(loss).T
print(f"Shape X {X.shape}")
print(f"Shape Y {Y.shape}")
print(f"Shape Z {Z.shape}")
mappable = plt.cm.ScalarMappable()
mappable.set_array(Z)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=mappable.cmap,
norm=mappable.norm)
ax.set_xlabel(f'{variables[xvar]}', fontsize=18)
ax.set_ylabel(f'{variables[yvar]}', fontsize=18)
ax.set_zlabel('MSE', fontsize=18)
cb = plt.colorbar(mappable)
cb.set_label(label="Mean Squared Error",
fontsize=16,
rotation=-90,
labelpad=25)
fig.tight_layout()
plt.show()
# =======================================================================
# Save data
# =======================================================================
if save_path is not None:
if yset is not None:
fname = f"_{xvar}_{yvar}_loss"
np.save('./data/' + save_path + fname, loss)
else:
fname = f"_{xvar}_loss"
np.save('./data/' + save_path + fname, loss)
return loss | 5,357,719 |
def main(cli_args):
"""Run all given Robot Framework data sources in parallel."""
robot_parallel = RobotParallel()
try:
robot_parallel.parse_arguments(cli_args)
robot_parallel.execute(worker, (cpu_count() * 2) - 1)
robot_parallel.flush_stdout()
# pylint: disable=broad-except
except Exception:
pass
finally:
return_code = robot_parallel.merge_results()
robot_parallel.exit(return_code) | 5,357,720 |
async def test_meta(httpx_mock: HTTPXMock):
"""Test the meta information."""
httpx_mock.add_response(json=RESPONSE_VALID)
client = Luftdaten(SENSOR_ID)
await client.get_data()
assert client.meta["sensor_id"] == 1
assert client.meta["latitude"] == 48.792
assert client.meta["longitude"] == 9.164
assert client.meta["altitude"] == 326.9 | 5,357,721 |
def rebuild_schema(doc, r, df):
"""Rebuild the schema for a resource based on a dataframe"""
import numpy as np
# Re-get the resource in the doc, since it may be different.
try:
r = doc.resource(r.name)
except AttributeError:
# Maybe r is actually a resource name
r = doc.resource(r)
def alt_col_name(name, i):
import re
if not name:
return 'col{}'.format(i)
return re.sub('_+', '_', re.sub('[^\w_]', '_', str(name)).lower()).rstrip('_')
df_types = {
np.dtype('O'): 'text',
np.dtype('int64'): 'integer',
np.dtype('float64'): 'number'
}
try:
df_index_frame = df.index.to_frame()
except AttributeError:
df_index_frame = None
def get_col_dtype(c):
c = str(c)
try:
return df_types[df[c].dtype]
except KeyError:
# Maybe it is in the index?
pass
try:
return df_types[df_index_frame[c].dtype]
except TypeError:
# Maybe not a multi-index
pass
if c == 'id' or c == df.index.name:
return df_types[df.index.dtype]
return 'unknown'
columns = []
schema_term = r.schema_term[0]
if schema_term:
old_cols = {c['name'].value: c.properties for c in schema_term.children}
for c in schema_term.children:
schema_term.remove_child(c)
schema_term.children = []
else:
old_cols = {}
schema_term = doc['Schema'].new_term('Table', r.schema_name)
index_names = [n if n else "id" for n in df.index.names]
for i, col in enumerate(index_names + list(df.columns)):
acn = alt_col_name(col, i) if alt_col_name(col, i) != str(col) else ''
d = {'name': col, 'datatype': get_col_dtype(col), 'altname': acn}
if col in old_cols.keys():
lookup_name = col
elif acn in old_cols.keys():
lookup_name = acn
else:
lookup_name = None
if lookup_name and lookup_name in old_cols:
for k, v in schema_term.properties.items():
old_col = old_cols.get(lookup_name)
for k, v in old_col.items():
if k != 'name' and v:
d[k] = v
columns.append(d)
for c in columns:
name = c['name']
del c['name']
datatype = c['datatype']
del c['datatype']
altname = c['altname']
del c['altname']
schema_term.new_child('Column', name, datatype=datatype, altname=altname, **c) | 5,357,722 |
def compute_similarity(image, reference):
"""Compute a similarity index for an image compared to a reference image.
Similarity index is based on a the general algorithm used in the AmphiIndex algorithm.
- identify slice of image that is a factor of 256 in size
- rebin image slice down to a (256,256) image
- rebin same slice from reference down to a (256,256) image
- sum the differences of the rebinned slices
- divide absolute value of difference scaled by reference slice sum
.. note:: This index will typically return values < 0.1 for similar images, and
values > 1 for dis-similar images.
Parameters
----------
image : ndarray
Image (as ndarray) to measure
reference : ndarray
Image which serves as the 'truth' or comparison image.
Returns
-------
similarity_index : float
Value of similarity index for `image`
"""
# Insure NaNs are replaced with 0
image = np.nan_to_num(image[:], nan=0)
reference = np.nan_to_num(reference[:], nan=0)
imgshape = (min(image.shape[0], reference.shape[0]),
min(image.shape[1], reference.shape[1]))
minsize = min(imgshape[0], imgshape[1])
# determine largest slice that is a power of 2 in size
window_bit = maxBit(minsize)
window = 2**window_bit
# Define how big the rebinned image should be for computing the sim index
# Insure a minimum rebinned size of 64x64
sim_bit = (window_bit - 2) if (window_bit - 2) > 6 else window_bit
sim_size = 2**sim_bit
# rebin image and reference
img = rebin(image[:window, :window], (sim_size, sim_size))
ref = rebin(reference[:window, :window], (sim_size, sim_size))
# Compute index
diffs = np.abs((img - ref).sum())
sim_indx = diffs / img.sum()
return sim_indx | 5,357,723 |
def _create_npu_quantization(
scale,
zero_point,
):
"""This is a helper function to capture a list
of arguments to create Vela NpuQuantization object
"""
# Scale could be an ndarray if per-channel quantization is available
if not isinstance(scale, tvm.tir.expr.Load):
if isinstance(scale.value, float):
scale = np.single(scale.value)
else:
assert isinstance(scale.value.value, float)
scale = np.single(scale.value.value)
q_params = vapi.NpuQuantization(scale_f32=scale, zero_point=zero_point.value)
return q_params | 5,357,724 |
def article_markdown(text):
""" 对传入的text文本进行markdown """
renderer = ArticleRenderer()
markdown = mistune.Markdown(renderer=renderer)
return markdown(text) | 5,357,725 |
def tf_pywt_wavelet_decomposition(patch_vec, patch_size, name, wavelet_type, level, mode):
"""
:param patch_vec:
:param patch_size:
:param name:
:param wavelet_type:
:param level:
:param mode:
:return:
"""
# TODO: docstring
# Convert input values for pywt
wavelet_type = wavelet_type.decode('utf-8')
mode = mode.decode('utf-8')
level = int(level)
patch_size = tuple(patch_size)
name = name.decode('utf-8')
# print('wavelet_type: {}, {}'.format(wavelet_type, type(wavelet_type)))
# print('mode: {}, {}'.format(mode, type(mode)))
# print('level: {}, {}'.format(level, type(level)))
# print('patch_vec: {}, {}'.format(patch_vec, type(patch_vec)))
# print('patch_size: {}, {}'.format(patch_size, type(patch_size)))
# print('name: {}, {}'.format(name, type(name)))
# Rebuild transform_dict from unpacked inputs
transform_dict = generate_transform_dict(patch_size, name, wavelet=wavelet_type, level=level, mode=mode)
# print(transform_dict)
# Decomposition
coeffs_vec, bookkeeping_mat = wavelet_decomposition(patch_vec, transform_dict)
return coeffs_vec.astype(np.float32), bookkeeping_mat.astype(np.int32) | 5,357,726 |
def launch_pycompss_application(app,
func,
log_level="off", # type: str
o_c=False, # type: bool
debug=False, # type: bool
graph=False, # type: bool
trace=False, # type: bool
monitor=None, # type: int
project_xml=None, # type: str
resources_xml=None, # type: str
summary=False, # type: bool
task_execution="compss", # type: str
storage_impl=None, # type: str
storage_conf=None, # type: str
streaming_backend=None, # type: str
streaming_master_name=None, # type: str
streaming_master_port=None, # type: str
task_count=50, # type: int
app_name=None, # type: str
uuid=None, # type: str
base_log_dir=None, # type: str
specific_log_dir=None, # type: str
extrae_cfg=None, # type: str
comm="NIO", # type: str
conn=DEFAULT_CONN, # type: str
master_name="", # type: str
master_port="", # type: str
scheduler=DEFAULT_SCHED, # type: str
jvm_workers=DEFAULT_JVM_WORKERS, # type: str
cpu_affinity="automatic", # type: str
gpu_affinity="automatic", # type: str
fpga_affinity="automatic", # type: str
fpga_reprogram="", # type: str
profile_input="", # type: str
profile_output="", # type: str
scheduler_config="", # type: str
external_adaptation=False, # type: bool
propagate_virtual_environment=True, # noqa type: bool
mpi_worker=False, # type: bool
worker_cache=False, # type: bool or str
shutdown_in_node_failure=False, # type: bool
io_executors=0, # type: int
env_script="", # type: str
reuse_on_block=True, # type: bool
nested_enabled=False, # type: bool
tracing_task_dependencies=False, # type: bool
trace_label=None, # type: str
extrae_cfg_python=None, # type: str
wcl=0, # type: int
cache_profiler=False, # type: bool
*args, **kwargs
): # NOSONAR
# type: (...) -> None
""" Launch PyCOMPSs application from function.
:param app: Application path
:param func: Function
:param log_level: Logging level [ "trace"|"debug"|"info"|"api"|"off" ]
(default: "off")
:param o_c: Objects to string conversion [ True | False ] (default: False)
:param debug: Debug mode [ True | False ] (default: False)
(overrides log_level)
:param graph: Generate graph [ True | False ] (default: False)
:param trace: Generate trace
[ True | False | "scorep" | "arm-map" | "arm-ddt"]
(default: False)
:param monitor: Monitor refresh rate (default: None)
:param project_xml: Project xml file path
:param resources_xml: Resources xml file path
:param summary: Execution summary [ True | False ] (default: False)
:param task_execution: Task execution (default: "compss")
:param storage_impl: Storage implementation path
:param storage_conf: Storage configuration file path
:param streaming_backend: Streaming backend (default: None)
:param streaming_master_name: Streaming master name (default: None)
:param streaming_master_port: Streaming master port (default: None)
:param task_count: Task count (default: 50)
:param app_name: Application name (default: Interactive_date)
:param uuid: UUId
:param base_log_dir: Base logging directory
:param specific_log_dir: Specific logging directory
:param extrae_cfg: Extrae configuration file path
:param comm: Communication library (default: NIO)
:param conn: Connector (default: DefaultSSHConnector)
:param master_name: Master Name (default: "")
:param master_port: Master port (default: "")
:param scheduler: Scheduler (default:
es.bsc.compss.scheduler.loadbalancing.LoadBalancingScheduler)
:param jvm_workers: Java VM parameters
(default: "-Xms1024m,-Xmx1024m,-Xmn400m")
:param cpu_affinity: CPU Core affinity (default: "automatic")
:param gpu_affinity: GPU Core affinity (default: "automatic")
:param fpga_affinity: FPA Core affinity (default: "automatic")
:param fpga_reprogram: FPGA reprogram command (default: "")
:param profile_input: Input profile (default: "")
:param profile_output: Output profile (default: "")
:param scheduler_config: Scheduler configuration (default: "")
:param external_adaptation: External adaptation [ True | False ]
(default: False)
:param propagate_virtual_environment: Propagate virtual environment
[ True | False ] (default: False)
:param mpi_worker: Use the MPI worker [ True | False ] (default: False)
:param worker_cache: Use the worker cache [ True | int(size) | False]
(default: False)
:param shutdown_in_node_failure: Shutdown in node failure [ True | False]
(default: False)
:param io_executors: <Integer> Number of IO executors
:param env_script: <String> Environment script to be sourced in workers
:param reuse_on_block: Reuse on block [ True | False] (default: True)
:param nested_enabled: Nested enabled [ True | False] (default: False)
:param tracing_task_dependencies: Include task dependencies in trace
[ True | False] (default: False)
:param trace_label: <String> Add trace label
:param extrae_cfg_python: <String> Extrae configuration file for the
workers
:param wcl: <Integer> Wallclock limit. Stops the runtime if reached.
0 means forever.
:param cache_profiler: Use the cache profiler [ True | False]
(default: False)
:param args: Positional arguments
:param kwargs: Named arguments
:return: Execution result
"""
# Check that COMPSs is available
if "COMPSS_HOME" not in os.environ:
# Do not allow to continue if COMPSS_HOME is not defined
raise PyCOMPSsException("ERROR: COMPSS_HOME is not defined in the environment") # noqa: E501
# Let the Python binding know we are at master
context.set_pycompss_context(context.MASTER)
# Then we can import the appropriate start and stop functions from the API
from pycompss.api.api import compss_start, compss_stop
##############################################################
# INITIALIZATION
##############################################################
if debug:
log_level = "debug"
# Initial dictionary with the user defined parameters
all_vars = parameters_to_dict(log_level,
debug,
o_c,
graph,
trace,
monitor,
project_xml,
resources_xml,
summary,
task_execution,
storage_impl,
storage_conf,
streaming_backend,
streaming_master_name,
streaming_master_port,
task_count,
app_name,
uuid,
base_log_dir,
specific_log_dir,
extrae_cfg,
comm,
conn,
master_name,
master_port,
scheduler,
jvm_workers,
cpu_affinity,
gpu_affinity,
fpga_affinity,
fpga_reprogram,
profile_input,
profile_output,
scheduler_config,
external_adaptation,
propagate_virtual_environment,
mpi_worker,
worker_cache,
shutdown_in_node_failure,
io_executors,
env_script,
reuse_on_block,
nested_enabled,
tracing_task_dependencies,
trace_label,
extrae_cfg_python,
wcl,
cache_profiler)
# Save all vars in global current flags so that events.py can restart
# the notebook with the same flags
export_current_flags(all_vars)
# Check the provided flags
flags, issues = check_flags(all_vars)
if not flags:
print_flag_issues(issues)
return None
# Prepare the environment
env_vars = prepare_environment(False, o_c, storage_impl, app,
debug, trace, mpi_worker)
all_vars.update(env_vars)
monitoring_vars = prepare_loglevel_graph_for_monitoring(monitor,
graph,
debug,
log_level)
all_vars.update(monitoring_vars)
if RUNNING_IN_SUPERCOMPUTER:
updated_vars = updated_variables_in_sc()
all_vars.update(updated_vars)
to_update = prepare_tracing_environment(all_vars["trace"],
all_vars["extrae_lib"],
all_vars["ld_library_path"])
all_vars["trace"], all_vars["ld_library_path"] = to_update
inf_vars = check_infrastructure_variables(all_vars["project_xml"],
all_vars["resources_xml"],
all_vars["compss_home"],
all_vars["app_name"],
all_vars["file_name"],
all_vars["external_adaptation"])
all_vars.update(inf_vars)
create_init_config_file(**all_vars)
##############################################################
# RUNTIME START
##############################################################
# Runtime start
compss_start(log_level, all_vars["trace"], True)
# Setup logging
binding_log_path = get_log_path()
log_path = os.path.join(all_vars["compss_home"],
"Bindings",
"python",
str(all_vars["major_version"]),
"log")
set_temporary_directory(binding_log_path)
logging_cfg_file = get_logging_cfg_file(log_level)
init_logging(os.path.join(log_path, logging_cfg_file), binding_log_path)
logger = logging.getLogger("pycompss.runtime.launch")
logger.debug("--- START ---")
logger.debug("PyCOMPSs Log path: %s" % log_path)
if storage_impl and storage_conf:
logger.debug("Starting storage")
persistent_storage = master_init_storage(all_vars["storage_conf"], logger)
else:
persistent_storage = False
logger.debug("Starting streaming")
streaming = init_streaming(all_vars["streaming_backend"],
all_vars["streaming_master_name"],
all_vars["streaming_master_port"])
saved_argv = sys.argv
sys.argv = args
# Execution:
with event(APPLICATION_RUNNING_EVENT, master=True):
if func is None or func == "__main__":
if IS_PYTHON3:
exec(open(app).read())
else:
execfile(app) # noqa
result = None
else:
if IS_PYTHON3:
from importlib.machinery import SourceFileLoader # noqa
imported_module = SourceFileLoader(all_vars["file_name"], app).load_module() # noqa
else:
import imp # noqa
imported_module = imp.load_source(all_vars["file_name"], app) # noqa
method_to_call = getattr(imported_module, func)
try:
result = method_to_call(*args, **kwargs)
except TypeError:
result = method_to_call()
# Recover the system arguments
sys.argv = saved_argv
# Stop streaming
if streaming:
stop_streaming()
# Stop persistent storage
if persistent_storage:
master_stop_storage(logger)
logger.debug("--- END ---")
##############################################################
# RUNTIME STOP
##############################################################
# Stop runtime
compss_stop()
clean_log_configs()
return result | 5,357,727 |
def plot_48hours(i, url, sorted_dts, sorted_dts2=None, save=False):
"""
包含了两条线!
"""
# print(url)
# print("实际传播开始和结束时间:", sorted_dts[0], sorted_dts[-1])
plt.figure(figsize=(10, 6))
ts = cal_ts_48hours(sorted_dts)
ts.plot()
if sorted_dts2:
ts2 = cal_ts_48hours(sorted_dts2)
ts2.plot()
# configure
plt.ylabel('N of tweets with this fake news', fontsize=15)
plt.xticks(fontsize=11); plt.yticks(fontsize=11)
# plt.xlabel('$Date$', fontsize=15)
# plt.title(url)
if save:
plt.savefig('fig/{}-{}-first-48-hours.pdf'.format(i, url), dpi=300)
else:
plt.show()
plt.close() | 5,357,728 |
def print_variables_by_scope():
"""Prints trainable variables by scope."""
vars = [(v.name, v.shape.as_list()) for v in tf.trainable_variables()]
vars = sorted(vars, key=lambda x: x[0])
last_scope = None
scope_n_params = 0
for i, (name, shape) in enumerate(vars):
current_scope = name.split('/', 1)[0]
if current_scope != last_scope:
if last_scope is not None:
scope_n_params = format_integer(scope_n_params)
print('\t# scope params = {}'.format(scope_n_params))
print
print('scope:', current_scope)
scope_n_params = 0
last_scope = current_scope
n_params = np.prod(shape, dtype=np.int32)
scope_n_params += n_params
print('\t', name, shape)
print('\t# scope params = {}'.format(format_integer(scope_n_params)))
print | 5,357,729 |
def insert_into_crime_report(summary: tuple):
"""
:param summary:
:return:
"""
connection = connect_to_db()
usr = connection.cursor()
usr.execute("INSERT INTO CrimeReport VALUES (?,?,?,?,?,?,?,?,?,?,?)", summary)
connection.commit()
connection.close() | 5,357,730 |
def _gen_key(user_id, key_name):
""" Tuck this into UserManager """
try:
manager = users.UserManager.instance()
private_key, fingerprint = manager.generate_key_pair(user_id, key_name)
except Exception as ex:
return {'exception': ex}
return {'private_key': private_key, 'fingerprint': fingerprint} | 5,357,731 |
def svhn_loader(size=None,root="./shvn",set="train",batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args):
"""
:param size:
:param root:
:param set:
:param batch_size:
:param mean:
:param std:
:param transform:
:param download:
:param target_transform:
:param loader_args:
:return:
"""
valid_sets = ('train', 'test', 'extra')
if set not in valid_sets: raise ValueError("set {} is invalid, valid sets include {}".format(set,valid_sets))
if size is not None:
if not isinstance(size,tuple):
size = (size,size)
if transform == "default":
t = []
if size is not None:
t.append(transformations.Resize(size))
t.append(transformations.ToTensor())
if mean is not None and std is not None:
if not isinstance(mean, tuple):
mean = (mean,)
if not isinstance(std, tuple):
std = (std,)
t.append(transformations.Normalize(mean=mean, std=std))
trans = transformations.Compose(t)
else:
trans = transform
data = SVHN(root,split=set,transform=trans,download=download,target_transform=target_transform)
shuffle_mode = True if set == "train" else False
return DataLoader(data,batch_size=batch_size,shuffle=shuffle_mode,**loader_args) | 5,357,732 |
def get_supervisees():
"""Pull the supervisor specifications out of the entry point."""
eps = list(pkg_resources.iter_entry_points(ENTRY_POINT_GROUP))
return dict((ep.name, ep.load()) for ep in eps) | 5,357,733 |
def delete_account(password_for):
"""
function to delete the credentials of a given account
"""
Credentials.delete_account(password_for) | 5,357,734 |
def object_context(name, sesh, conf, data=None, params=None):
"""Creates a CDMI object with the given file name, and deletes it when the ``with`` context exits.
Does no testing of return codes or any sort of validity. It does only call delete if the status code was 201
however.
To use it::
with object_context(u'wibble.txt', session, config, some_data) as response:
if response.status_code == 201:
print('Hooray!')
:param string name: File name of the object to create. This can be a path.
:param Requests.Session sesh: The create/delete has to be part of a session.
:param dict conf: Configuration values containing a ``host`` key with the full URL of the host, and a ``headers``
key containing a dictionary of custom HTTP header fields.
:param data: (Optional) Dictionary, bytes, or file-like object to send as the body of the object.
:param params: (optional) Dictionary or bytes to be sent in the query string.
:rtype: Requests.Response object.
"""
response = sesh.put('{0}/{1}/{2}'.format(conf['host'], conf['object-container'], name),
headers=conf['headers'], data=data, params=params)
log_request(response)
try:
yield response
except:
raise
finally:
if response.status_code == 201:
sesh.delete('{0}/{1}/{2}'.format(conf['host'], conf['object-container'], name), headers=conf['headers']) | 5,357,735 |
def test_withdraw_interactive_invalid_asset(
client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with invalid asset_code.
"""
acc1_usd_withdrawal_transaction_factory()
response = client.get(
f"/withdraw/interactive_withdraw?transaction_id=2&asset_code=ETH", follow=True
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid 'asset_code'"} | 5,357,736 |
def test_allocate_asset_dust_order_simple(
worker, other_worker, do_initial_allocation, maintain_until_allocated, base_account
):
"""Make dust order, check if it canceled and closer opposite order placed."""
do_initial_allocation(worker, worker.mode)
num_sell_orders_before = len(worker.sell_orders)
num_buy_orders_before = len(worker.buy_orders)
# Partially fill order from another account
sell_price = worker.buy_orders[0]['price'] / 1.01
sell_amount = worker.buy_orders[0]['quote']['amount'] * (1 - worker.partial_fill_threshold) * 1.1
other_worker.place_market_sell_order(sell_amount, sell_price)
worker.refresh_balances()
worker.refresh_orders()
worker.allocate_asset('quote', worker.quote_balance)
worker.refresh_orders()
num_sell_orders_after = len(worker.sell_orders)
num_buy_orders_after = len(worker.buy_orders)
assert num_buy_orders_before - num_buy_orders_after == 1
assert num_sell_orders_after - num_sell_orders_before == 1 | 5,357,737 |
async def index(request):
"""
This is the view handler for the "/" url.
**Note: returning html without a template engine like jinja2 is ugly, no way around that.**
:param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request
:return: aiohttp.web.Response object
"""
# {% if database.is_none and example.is_message_board %}
# app.router allows us to generate urls based on their names,
# see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources
message_url = request.app.router['messages'].url_for()
ctx = dict(
title=request.app['name'],
styles_css_url=request.app['static_root_url'] + '/styles.css',
content="""\
<p>Success! you've setup a basic aiohttp app.</p>
<p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p>
<b>
<a href="{message_url}">View and add messages</a>
</b>""".format(message_url=message_url)
)
# {% else %}
ctx = dict(
title=request.app['name'],
styles_css_url=request.app['static_root_url'] + '/styles.css',
content="<p>Success! you've setup a basic aiohttp app.</p>",
)
# {% endif %}
# with the base web.Response type we have to manually set the content type, otherwise text/plain will be used.
return web.Response(text=BASE_PAGE.format(**ctx), content_type='text/html') | 5,357,738 |
def adtg(s, t, p):
"""
Calculates adiabatic temperature gradient as per UNESCO 1983 routines.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db]
Returns
-------
adtg : array_like
adiabatic temperature gradient [℃ db :sup:`-1`]
Examples
--------
>>> # Data from UNESCO 1983 p45.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> t = T90conv([[ 0, 0, 0, 0, 0, 0],
... [10, 10, 10, 10, 10, 10],
... [20, 20, 20, 20, 20, 20],
... [30, 30, 30, 30, 30, 30],
... [40, 40, 40, 40, 40, 40]])
>>> s = [[25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35]]
>>> p = [0, 5000, 10000, 0, 5000, 10000]
>>> sw.adtg(s, t, p)
array([[ 1.68710000e-05, 1.04700000e-04, 1.69426000e-04,
3.58030000e-05, 1.17956500e-04, 1.77007000e-04],
[ 1.00194580e-04, 1.60959050e-04, 2.06874170e-04,
1.14887280e-04, 1.71364200e-04, 2.12991770e-04],
[ 1.73819840e-04, 2.13534000e-04, 2.44483760e-04,
1.84273240e-04, 2.21087800e-04, 2.49137960e-04],
[ 2.41720460e-04, 2.64764100e-04, 2.82959590e-04,
2.47934560e-04, 2.69466550e-04, 2.86150390e-04],
[ 3.07870120e-04, 3.16988600e-04, 3.23006480e-04,
3.09844920e-04, 3.18839700e-04, 3.24733880e-04]])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic
temperature gradient and potential temperature of sea water. Deep-Sea
Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6
"""
s, t, p = map(np.asanyarray, (s, t, p))
T68 = T68conv(t)
a = [3.5803e-5, 8.5258e-6, -6.836e-8, 6.6228e-10]
b = [1.8932e-6, -4.2393e-8]
c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14]
d = [-1.1351e-10, 2.7759e-12]
e = [-4.6206e-13, 1.8676e-14, -2.1687e-16]
return (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68 +
(b[0] + b[1] * T68) * (s - 35) +
((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) +
(d[0] + d[1] * T68) * (s - 35)) * p +
(e[0] + (e[1] + e[2] * T68) * T68) * p * p) | 5,357,739 |
def segm_set_center_levels(name, seg_labels, path_out, levels=DISTANCE_LEVELS):
""" set segmentation levels according distance inside object imsegm
:param str name: image name
:param ndarray seg_labels:
:param str path_out: path for output
:param [float] levels: distance levels fro segmentation levels
"""
seg = np.zeros_like(seg_labels)
# set bourders to 0
# seg_labels = set_boundary_values(seg_labels)
for obj_id in range(1, seg_labels.max() + 1):
im_bin = (seg_labels == obj_id)
if np.sum(im_bin) == 0:
continue
distance = ndimage.distance_transform_edt(im_bin)
probab = distance / np.max(distance)
pos_center = ndimage.measurements.center_of_mass(im_bin)
# logging.debug('object %i with levels: %s', obj_id, repr(levels))
for i, level in enumerate(levels):
mask = probab > level
if i > 0:
radius = int(np.sqrt(np.sum(mask) / np.pi))
im_level = draw_circle(pos_center, radius, mask.shape)
mask = np.logical_and(mask, im_level)
sel = morphology.disk(int(radius * 0.15))
mask = morphology.binary_opening(mask, sel)
seg[mask] = i + 1
path_seg = os.path.join(path_out, name)
tl_data.io_imsave(path_seg, seg.astype(np.uint8)) | 5,357,740 |
def calculate_ion_mz(seq: str,
ion: str = 'M',
charge: int = 0
) -> float:
"""
given a peptide sequence and ion type, count the number of atoms, accounting for ion
type and whether cysteines are measured by IAA
- ion type
M: full peptide parent ion (with H2O)
b: b ion (no addition)
y: y ion (with H2O)
:param seq: str amino acid sequence with modifications defined by []
:param ion: str ion type (default: M to return peptide mass)
:param charge: int numerical charge (default: 0 to return peptide mass)
:return: float accurate mass
"""
assert type(charge) == int, "Charge must be integer."
mass = 0
# First, strip all mass shifts and add them to the starting mass
try:
mods = [float(mod[1:-1]) for mod in re.findall('\\[.*?]', seq)]
except ValueError:
raise ValueError('Modification contains string characters.')
# 2021-11-22 exclude label mass from peptide mass calculation
mass += sum(m for m in mods if m not in params.label_mass)
# 2021-05-18 strip all N-terminal n from Comet
seq = re.sub('^n', '', seq)
# Strip all modifications
stripped = re.sub('\\[.*?]', '', seq)
res_atoms = _count_residue_atoms(stripped,
iaa=params.iaa, # add iodoacetamide to cysteine
)
# dictionary for complementary atoms to add to ion types
comp_atom_dict = {
'M': [0, 2, 1, 0, 0],
'b': [0, 0, 0, 0, 0],
'y': [0, 2, 1, 0, 0],
'b_': [0, -2, -1, 0, 0],
'y_': [0, 0, 0, 0, 0],
}
comp_atoms = comp_atom_dict[ion]
ion_atoms = [res_atoms[i] + comp_atoms[i] for i, v in enumerate(res_atoms)]
mass += _calc_atom_mass(ion_atoms)
# Return peptide mass if charge is 0
if charge > 0:
mz = (mass + constants.PROTON_MASS * charge) / charge
return mz
if charge < 0:
raise ValueError('Negative charges are not supported.')
return mass | 5,357,741 |
def get_all_sub_folders(folder_path):
"""get all sub folders to list
Parameters
----------
folder_path : str
Returns
-------
list
"""
sub_folders = []
for path in os.listdir(folder_path):
full_path = os.path.join(folder_path, path)
if os.path.isdir(full_path):
sub_folders.append(full_path)
return sub_folders | 5,357,742 |
def calculate_correlation(type_, df, col1, col2):
"""Calculate the defined correlation coefficient.
This function accepts the type of correlation coefficient to be calculated, the data frame and the two column.
It returns the calculated coefficient.
Keyword arguments:
type_ -- type of correlation coefficient to be calculated
df -- the dataframe
col1 -- first column
col2 -- second column
Returns:
corr -- the calculated correlation coefficient
"""
if type_ =='pearson':
corr, _ = pearsonr(df[col1], df[col2])
print('Pearsons correlation: %.3f' % corr)
elif type_ =='Spearman':
newdf = df[[col1,col2]].copy()
newdf[col1] = newdf[col1].rank()
newdf[col2] =newdf[col2].rank()
newdf['d']=newdf[col2] -newdf[col1]
newdf["d^2"]= newdf['d']**2
d_square= sum(newdf['d^2'])
l= len(newdf[col2])
d= 6*d_square
spearman = 1- d/(l*(l**2 -1))
print("Spearman rank correlation is",np.round(spearman,2))
else:
print(print("Pass either pearson/Spearman")) | 5,357,743 |
def hhc_to_int(s):
"""Parse a number expressed in sortable hhc as an integer (or long).
>>> hhc_to_int('-')
0
>>> hhc_to_int('.')
1
>>> hhc_to_int('~')
65
>>> hhc_to_int('.-')
66
>>> hhc_to_int('..')
67
>>> hhc_to_int('.XW')
6700
>>> hhc_to_int('----..')
67
>>> print(hhc_to_int('fDpEShMz-qput'))
302231454903657293676544
Negative numbers are supported.
>>> hhc_to_int(',zST')
-6700
"""
if s == '' or s is None or s[:2] == ',,':
raise ValueError("invalid literal for hhc_to_int: {}".format(s))
if s[0] == NEGATIVE_PREFIX:
return -hhc2_to_int(s[1:], alphabet=HHC_ALPHABET[::-1])
return hhc2_to_int(s, HHC_ALPHABET) | 5,357,744 |
def initializeSens(P, B, idxs):
"""
This function initializes the sensitivities using the bicriteria algorithm, to be the distance between each
point to it's closest flat from the set of flats B divided by the sum of distances between self.P.P and B.
:param B: A set of flats where each flat is represented by an orthogonal matrix and a translation vector.
:param idxs: A numpy array which represents the clustering which B imposes on self.P.P
:return: None.
"""
centers_idxs = np.unique(idxs) # number of clusters imposed by B
sensitivity_additive_term = np.zeros((P.shape[0], ))
for center_idx in centers_idxs: # go over each cluster of points from self.P.P
cluster_per_center = np.where(idxs == center_idx)[0] # get all points in certain cluster
# compute the distance of each point in the cluster to its respect flat
cost_per_point_in_cluster = Utils.computeDistanceToSubspace(P[cluster_per_center, :-1],
B[center_idx][0], B[center_idx][1])
# ost_per_point_in_cluster = np.apply_along_axis(lambda x:
# Utils.computeDistanceToSubspace(x, B[center_idx][0],
# B[center_idx][1]), 1,
# self.set_P.P[cluster_per_center, :-1])
# set the sensitivity to the distance of each point from its respected flat divided by the total distance
# between cluster points and the respected flat
sensitivity_additive_term[cluster_per_center] = 2 ** Utils.J * \
np.nan_to_num(cost_per_point_in_cluster /
np.sum(cost_per_point_in_cluster))
return sensitivity_additive_term | 5,357,745 |
def _label_plot(title="Untitled Plot", xlabel="x axis", ylabel="y axis") -> None:
"""Apply titles and axis labels to a plot."""
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel) | 5,357,746 |
def fast_knn(data, k=3, eps=0, p=2, distance_upper_bound=np.inf, leafsize=10, idw=util_idw.shepards):
""" Impute using a variant of the nearest neighbours approach
Basic idea: Impute array with a basic mean impute and then use the resulting complete
array to construct a KDTree. Use this KDTree to compute nearest neighbours.
After finding `k` nearest neighbours, take the weighted average of them. Basically,
find the nearest row in terms of distance
This approach is much, much faster than the other implementation (fit+transform
for each subset) which is almost prohibitively expensive.
Parameters
----------
data: numpy.ndarray
2D matrix to impute.
k: int, optional
Parameter used for method querying the KDTree class object. Number of
neighbours used in the KNN query. Refer to the docs for
[`scipy.spatial.KDTree.query`]
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html).
eps: nonnegative float, optional
Parameter used for method querying the KDTree class object. From the
SciPy docs: "Return approximate nearest neighbors; the kth returned
value is guaranteed to be no further than (1+eps) times the distance to
the real kth nearest neighbor". Refer to the docs for
[`scipy.spatial.KDTree.query`]
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html).
p : float, 1<=p<=infinity, optional
Parameter used for method querying the KDTree class object. Straight from the
SciPy docs: "Which Minkowski p-norm to use. 1 is the
sum-of-absolute-values Manhattan distance 2 is the usual Euclidean
distance infinity is the maximum-coordinate-difference distance". Refer to
the docs for
[`scipy.spatial.KDTree.query`]
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html).
distance_upper_bound : nonnegative float, optional
Parameter used for method querying the KDTree class object. Straight
from the SciPy docs: "Return only neighbors within this distance. This
is used to prune tree searches, so if you are doing a series of
nearest-neighbor queries, it may help to supply the distance to the
nearest neighbor of the most recent point." Refer to the docs for
[`scipy.spatial.KDTree.query`]
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html).
leafsize: int, optional
Parameter used for construction of the `KDTree` class object. Straight from
the SciPy docs: "The number of points at which the algorithm switches
over to brute-force. Has to be positive". Refer to the docs for
[`scipy.spatial.KDTree`](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.KDTree.html)
for more information.
idw: fn, optional
Function that takes one argument, a list of distances, and returns weighted percentages. You can define a custom
one or bootstrap from functions defined in `impy.util.inverse_distance_weighting` which can be using
functools.partial, for example: `functools.partial(impy.util.inverse_distance_weighting.shepards, power=1)`
Returns
-------
numpy.ndarray
Imputed data.
Examples
--------
>>> data = np.arange(25).reshape((5, 5)).astype(np.float)
>>> data[0][2] = np.nan
>>> data
array([[ 0., 1., nan, 3., 4.],
[ 5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]])
>> fast_knn(data, k=1) # Weighted average (by distance) of nearest 1 neighbour
array([[ 0., 1., 7., 3., 4.],
[ 5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]])
>> fast_knn(data, k=2) # Weighted average of nearest 2 neighbours
array([[ 0. , 1. , 10.08608891, 3. , 4. ],
[ 5. , 6. , 7. , 8. , 9. ],
[10. , 11. , 12. , 13. , 14. ],
[15. , 16. , 17. , 18. , 19. ],
[20. , 21. , 22. , 23. , 24. ]])
>> fast_knn(data, k=3)
array([[ 0. , 1. , 13.40249283, 3. , 4. ],
[ 5. , 6. , 7. , 8. , 9. ],
[10. , 11. , 12. , 13. , 14. ],
[15. , 16. , 17. , 18. , 19. ],
[20. , 21. , 22. , 23. , 24. ]])
>> fast_knn(data, k=5) # There are at most only 4 neighbours. Raises error
...
IndexError: index 5 is out of bounds for axis 0 with size 5
"""
null_xy = find_null(data)
data_c = mean(data)
kdtree = KDTree(data_c, leafsize=leafsize)
for x_i, y_i in null_xy:
distances, indices = kdtree.query(data_c[x_i], k=k+1, eps=eps,
p=p, distance_upper_bound=distance_upper_bound)
# Will always return itself in the first index. Delete it.
distances, indices = distances[1:], indices[1:]
# Add small constant to distances to avoid division by 0
distances += 1e-3
weights = idw(distances)
# Assign missing value the weighted average of `k` nearest neighbours
data[x_i][y_i] = np.dot(weights, [data_c[ind][y_i] for ind in indices])
return data | 5,357,747 |
def role_generator(role):
"""Closure function returning a role function."""
return lambda *args, **kwargs: role.run(*args, **kwargs) | 5,357,748 |
def pick_slices(img, num_slices_per_view):
"""
Picks the slices to display in each dimension,
skipping any empty slices (without any segmentation at all).
"""
slices = list()
for view in range(len(img.shape)):
dim_size = img.shape[view]
non_empty_slices = np.array(
[sl for sl in range(dim_size) if np.count_nonzero(get_axis(img, view, sl)) > 0])
num_non_empty = len(non_empty_slices)
# trying to 5% slices at the tails (bottom clipping at 0)
skip_count = max(0, np.around(num_non_empty * 0.05).astype('int16'))
# only when possible
if skip_count > 0 and (num_non_empty - 2 * skip_count >= num_slices_per_view):
non_empty_slices = non_empty_slices[skip_count: -skip_count]
num_non_empty = len(non_empty_slices)
# sampling non-empty slices only
sampled_indices = np.linspace(0, num_non_empty, num=min(num_non_empty, num_slices_per_view),
endpoint=False)
slices_in_dim = non_empty_slices[np.around(sampled_indices).astype('int64')]
# ensure you do not overshoot
slices_in_dim = [sn for sn in slices_in_dim if sn >= 0 or sn <= num_non_empty]
slices.append(slices_in_dim)
return slices | 5,357,749 |
def cate2(request):
"""
DB에서 Cate2의 분류 이름을 반환
"""
cate1 = Cate1.objects.get(cate1_name=request.GET.get('cate1'))
cate2 = list(map(lambda cate2 : cate2['cate2_name'],
Cate2.objects.filter(cate1=cate1).values('cate2_name')))
json_data = json.dumps({'cate2': cate2})
return HttpResponse(json_data, content_type="application/json") | 5,357,750 |
def get_single_endpoint(name):
"""
TODO - Add docstring
"""
class EndpointWithID(Resource):
def get(self, pid):
return get_with_id(name, pid), 200
# TODO - Add `get.__doc__`
EndpointWithID.__name__ = name
return EndpointWithID | 5,357,751 |
def dayChange():
"""
Day Change
Calculates and stores in a dictionary the total current change in position
value since yesterday, which is (current_price - lastday_price)* qty.
:return: dictionary
"""
daychange = dict()
for position in portfolio:
# Strings are returned from API; convert to floating point type
current = float(position.current_price)
last = float(position.lastday_price)
quant = float(position.qty)
daychange[position.symbol] = (current - last) * quant
return daychange | 5,357,752 |
def look_for_section(line):
"""Look for one of the sections in a line of text."""
for key in SECTIONS:
if line.startswith(key):
return key
return None | 5,357,753 |
def check_rootfolders():
"""Create log and model folder"""
folders_util = [args.root_log, args.root_model,
os.path.join(args.root_log, args.store_name),
os.path.join(args.root_model, args.store_name)]
for folder in folders_util:
if not os.path.exists(folder):
print('creating folder ' + folder)
os.mkdir(folder) | 5,357,754 |
def SetStrucIdx(sid, index):
"""
Change structure index
@param sid: structure type ID
@param index: new index of the structure
@return: != 0 - ok
@note: See GetFirstStrucIdx() for the explanation of
structure indices and IDs.
"""
s = idaapi.get_struc(sid)
if not s:
return 0
return idaapi.set_struc_idx(s, index) | 5,357,755 |
def extract_user_dict_from_tweet( tweet: Tweet ):
"""Takes the other_data field from a tweet object and
extracts the data for the user from it.
It returns a dictionary rather than a User model object
because we might want to try looking up whether the user
exists before creating a new user object.
:type tweet Tweet
:returns dict
"""
if tweet.other_data and len( tweet.other_data ) > 0:
# extract the json into a dict
j = json.loads( tweet.other_data )
# extract the user json from the created dict
return json.loads( j[ 'user' ] ) | 5,357,756 |
def in_scope(repository_data):
"""Return whether the given repository is in scope for the configuration.
Keyword arguments:
repository_data -- data for the repository
"""
if "scope" in repository_data["configuration"] and repository_data["configuration"]["scope"] == "all":
return True
# Determine if user has sufficient permissions in the repository to approve the workflow run
return not repository_data["object"].archived and (
repository_data["permissions"] == "write" or repository_data["permissions"] == "admin"
) | 5,357,757 |
def run_image_container_checks(
image_container: Union[AICSImage, Reader],
set_scene: str,
expected_scenes: Tuple[str, ...],
expected_current_scene: str,
expected_shape: Tuple[int, ...],
expected_dtype: np.dtype,
expected_dims_order: str,
expected_channel_names: Optional[List[str]],
expected_physical_pixel_sizes: Tuple[
Optional[float], Optional[float], Optional[float]
],
expected_metadata_type: Union[type, Tuple[Union[type, Tuple[Any, ...]], ...]],
) -> Union[AICSImage, Reader]:
"""
A general suite of tests to run against image containers (Reader and AICSImage).
"""
# Check serdes
check_can_serialize_image_container(image_container)
# Set scene
image_container.set_scene(set_scene)
# Check scene info
assert image_container.scenes == expected_scenes
assert image_container.current_scene == expected_current_scene
# Check basics
assert image_container.shape == expected_shape
assert image_container.dtype == expected_dtype
assert image_container.dims.order == expected_dims_order
assert image_container.dims.shape == expected_shape
assert image_container.channel_names == expected_channel_names
assert image_container.physical_pixel_sizes == expected_physical_pixel_sizes
assert isinstance(image_container.metadata, expected_metadata_type)
# Read different chunks
zyx_chunk_from_delayed = image_container.get_image_dask_data("ZYX").compute()
cyx_chunk_from_delayed = image_container.get_image_dask_data("CYX").compute()
# Check image still not fully in memory
assert image_container._xarray_data is None
# Read in mem then pull chunks
zyx_chunk_from_mem = image_container.get_image_data("ZYX")
cyz_chunk_from_mem = image_container.get_image_data("CYX")
# Compare chunk reads
np.testing.assert_array_equal(
zyx_chunk_from_delayed,
zyx_chunk_from_mem,
)
np.testing.assert_array_equal(
cyx_chunk_from_delayed,
cyz_chunk_from_mem,
)
# Check that the shape and dtype are expected after reading in full
assert image_container.data.shape == expected_shape
assert image_container.data.dtype == expected_dtype
# Check serdes
check_can_serialize_image_container(image_container)
return image_container | 5,357,758 |
def collect_validation_helper(package_names: str) -> Type[ValidationHelper]:
"""Finds subclasses of the validate.ValidationHelper from a
list of package names.
Args:
package_names: A list of Python package names as strings.
Returns:
A validator class that are subclasses of validate.ValidationHelper.
"""
validation_cls = find_subclasses(package_names, ValidationHelper)
return validation_cls[0] | 5,357,759 |
def heartbeat(request):
"""Test that ElasticSearch is operationnal.
:param request: current request object
:type request: :class:`~pyramid:pyramid.request.Request`
:returns: ``True`` is everything is ok, ``False`` otherwise.
:rtype: bool
"""
indexer = request.registry.indexer
try:
return indexer.client.ping()
except Exception as e:
logger.exception(e)
return False | 5,357,760 |
def _load_edge_data(graph, regions):
"""Load and return all relevant edges from the graph."""
has_seat = _load_edges_from_query(
graph,
'SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Seat')
# The edges in the existing dataset point from parent to child region / settlement.
# In the desired dataset, we want the edge to be the other way, so we switch
# the "in_rid" and "out_rid" names.
has_parent_region = _load_edges_from_query(
graph, '''
SELECT inV().@rid AS out_rid, outV().@rid AS in_rid FROM E WHERE
(
@this INSTANCEOF "Has_Castles" OR
@this INSTANCEOF "Has_Cities" OR
@this INSTANCEOF "Has_Towns" OR
@this INSTANCEOF "Has_Villages" OR
@this INSTANCEOF "Has_Regional+capital" OR
@this INSTANCEOF "Has_Places"
) AND (
inV() INSTANCEOF "Region" OR inV() INSTANCEOF "Settlement"
) AND (
outV() INSTANCEOF "Region" OR outV() INSTANCEOF "Settlement"
)
''') + _load_missing_region_edges(regions)
lives_in = _load_edges_from_query(
graph, '''
SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Place WHERE (
(inV() INSTANCEOF "Region" OR inV() INSTANCEOF "Settlement") AND
outV() INSTANCEOF "Character"
)''')
owes_allegiance_to = _load_edges_from_query(
graph, '''
SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Allegiance WHERE (
(
inV() INSTANCEOF "Character" OR
inV() INSTANCEOF "Noblehouse" OR
inV() INSTANCEOF "Noble_house"
) AND (
outV() INSTANCEOF "Character" OR
outV() INSTANCEOF "Noblehouse" OR
outV() INSTANCEOF "Noble_house"
)
)''')
return set(has_seat), set(has_parent_region), set(lives_in), set(owes_allegiance_to) | 5,357,761 |
def get_story_assignee(jira_sheet, process):
""" Accessor for Story Assignee
Accessor method for retrieving the value for Story Assignee on the
JIRA Stories Sheet.
There is a check to make certain the process in question is amongst those
qualified to exist.
Args:
jira_sheet: A variable holding an Excel Workbook sheet in memory.
process: A variable holding the process of an Issue.
Returns:
A string value of the Parent
"""
if process in PROCESS_DICT:
return (jira_sheet[PROCESS_DICT.get(process) + "6"].value)
else:
print("""Error: " + process + " is an invalid process.
The following QE processes are acceptable: Complaints, Inquiry,
CAPA, Quality Event, Change Control.\n""") | 5,357,762 |
def predict_mhalo(obs_dsigma, mock_use, logms_mod_tot, logms_mod_inn, sig_logms=None):
"""Halo mass and its scatter in each bin.
Parameters
----------
obs_dsigma: list
List of observed DeltaSigma profiles.
mock_use: numpy array
UniverseMachine mock catalog.
logms_mod_tot : ndarray
Total stellar mass (e.g. M100) predicted by UM.
logms_mod_inn : ndarray
Inner stellar mass (e.g. M10) predicted by UM.
sig_logms: numpy array, optional
Uncertainties of stellar mass. Default: None
"""
# The mock catalog and precomputed mass files for subsamples
return [get_mean_mhalo(mock_use, obs_prof, logms_mod_tot, logms_mod_inn, sig_logms=sig_logms)
for obs_prof in obs_dsigma] | 5,357,763 |
def create_agent_model(env, lr=1e-4, h_size=128, epsilon=0.2, beta=1e-3, max_step=5e6, normalize=False, num_layers=2):
"""
Takes a Unity environment and model-specific hyper-parameters and returns the
appropriate PPO agent model for the environment.
:param env: a Unity environment.
:param lr: Learning rate.
:param h_size: Size of hidden layers/
:param epsilon: Value for policy-divergence threshold.
:param beta: Strength of entropy regularization.
:return: a sub-class of PPOAgent tailored to the environment.
:param max_step: Total number of training steps.
"""
if num_layers < 1: num_layers = 1
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
if brain.action_space_type == "continuous":
return ContinuousControlModel(lr, brain, h_size, epsilon, max_step, normalize, num_layers)
if brain.action_space_type == "discrete":
return DiscreteControlModel(lr, brain, h_size, epsilon, beta, max_step, normalize, num_layers) | 5,357,764 |
def decode_to_sequence(encoded_sequence: Bytes) -> List[RLP]:
"""
Decodes a rlp encoded byte stream assuming that the decoded data
should be of type `Sequence` of objects.
Parameters
----------
encoded_sequence :
An RLP encoded Sequence.
Returns
-------
decoded : `Sequence[RLP]`
Sequence of objects decoded from `encoded_sequence`.
"""
if encoded_sequence[0] <= 0xF7:
len_joined_encodings = encoded_sequence[0] - 0xC0
ensure(len_joined_encodings < len(encoded_sequence))
joined_encodings = encoded_sequence[1 : 1 + len_joined_encodings]
else:
joined_encodings_start_idx = 1 + encoded_sequence[0] - 0xF7
ensure(joined_encodings_start_idx - 1 < len(encoded_sequence))
# Expectation is that the big endian bytes shouldn't start with 0
# while trying to decode using RLP, in which case is an error.
ensure(encoded_sequence[1] != 0)
len_joined_encodings = Uint.from_be_bytes(
encoded_sequence[1:joined_encodings_start_idx]
)
ensure(len_joined_encodings >= 0x38)
joined_encodings_end_idx = (
joined_encodings_start_idx + len_joined_encodings
)
ensure(joined_encodings_end_idx - 1 < len(encoded_sequence))
joined_encodings = encoded_sequence[
joined_encodings_start_idx:joined_encodings_end_idx
]
return decode_joined_encodings(joined_encodings) | 5,357,765 |
def list_field_override_choices(override_map=None, html=True):
"""
This returns either a list of allowable choices, or an HTML-formatted unordered list (default).
"""
if override_map:
if html:
choices = '<b>These are the allowable field override choices for field name:<ul>'
else:
choices = []
for item in override_map:
if html:
choices += '<li>{}</li>'.format(item['field'])
else:
choices.append(item['field'])
return choices
return None | 5,357,766 |
def add_create_subcommand(subparser_factory):
"""Add a subparser for the create subcommand."""
create_help = "Create and load a database from an sql dump file."
subparser = subparser_factory.add_parser('create', help=create_help,
description=create_help)
dbname_help = "The name of the database to be created."
subparser.add_argument('dbname', help=dbname_help)
dbfile_help = ("An sql database dump to be loaded into the new " +
"database. If not given, an empty hypercube database " +
"will be loaded.")
subparser.add_argument('dbfile', help=dbfile_help, nargs='?',
default=DEFAULT_DBFILE)
subparser.set_defaults(subcommand=run_create_subcommand) | 5,357,767 |
def setup(app):
""" some tools for markdown parsing """
config = {
# 'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
}
app.add_config_value('recommonmark_config', config, True)
app.add_transform(AutoStructify)
# from m2r to make `mdinclude` work
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_directive('mdinclude', MdInclude) | 5,357,768 |
def get_logger(initial_level=logging.DEBUG):
"""Gets the named logger"""
logger = logging.getLogger('ungoogled')
if logger.level == logging.NOTSET:
logger.setLevel(initial_level)
if not logger.hasHandlers():
console_handler = logging.StreamHandler()
console_handler.setLevel(initial_level)
format_string = '%(levelname)s: %(message)s'
formatter = logging.Formatter(format_string)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger | 5,357,769 |
def config_logger(name, log_file, file_level, console_level):
"""Configure the logger that should be used by all modules in this
package.
This method sets up a logger, such that all messages are written to console
and to an extra logging file. Both outputs will be the same, except that
a message logged to file contains the module name, where the message comes
from.
The implementation is based on an earlier implementation of a function I
used in another project:
https://git.io/fNDZJ
Args:
name: The name of the created logger.
log_file: Path of the log file. If None, no logfile will be generated.
If the logfile already exists, it will be overwritten.
file_level: Log level for logging to log file.
console_level: Log level for logging to console.
Returns:
The configured logger.
"""
file_formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s' \
+ ' - %(module)s - %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p')
stream_formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s' \
+ ' - %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p')
if log_file is not None:
log_dir = os.path.dirname(log_file)
if log_dir != '' and not os.path.isdir(log_dir):
os.mkdir(log_dir)
if os.path.exists(log_file):
os.remove(log_file)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(file_formatter)
file_handler.setLevel(file_level)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(stream_formatter)
stream_handler.setLevel(console_level)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
if log_file is not None:
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger | 5,357,770 |
def _safe_resolve_url(url):
"""
Previously, resolve_url_lazy would fail if the url was a unicode object.
See <https://github.com/fusionbox/django-authtools/issues/13> for more
information.
Thanks to GitHub user alanwj for pointing out the problem and providing
this solution.
"""
return six.text_type(resolve_url(url)) | 5,357,771 |
def get_metrics(
reset: bool = False, include_custom: bool = True, raise_errors: bool = True,
) -> pd.DataFrame:
"""
Returns table of available metrics used for CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> from pycaret.regression import *
>>> exp_name = setup(data = boston, target = 'medv')
>>> all_metrics = get_metrics()
reset: bool, default = False
When True, will reset all changes made using the ``add_metric``
and ``remove_metric`` function.
include_custom: bool, default = True
Whether to include user added (custom) metrics or not.
raise_errors: bool, default = True
If False, will suppress all exceptions, ignoring models that
couldn't be created.
Returns:
pandas.DataFrame
"""
return pycaret.internal.tabular.get_metrics(
reset=reset, include_custom=include_custom, raise_errors=raise_errors,
) | 5,357,772 |
def get_unique_wikilinks(filepath):
"""Get UNIQUE wikilinks from a md file.
The links' order of appearance in the file IS preserved in the output.
This accounts for:
- Aliases / alt text, so [[Lorem ipsum|L.I.]]
will be represented as 'Lorem ipsum'.
- Header text links, so [[Lorem ipsum#Dummy text]]
will be represented as 'Lorem ipsum'.
Args:
filepath (pathlib Path): Path object representing the file from
which info will be extracted.
Returns:
list of strings
"""
plaintext = _get_ascii_plaintext_from_md_file(filepath, remove_code=True)
wikilinks = _get_unique_wikilinks(plaintext, remove_aliases=True)
return wikilinks | 5,357,773 |
def parse_single_example(serialized_example, params):
"""Parses a singel serialized TFExample string."""
decoder = tf_example_decoder.TfExampleDecoder()
data = decoder.decode(serialized_example)
image = data['image']
source_id = data['source_id']
source_id = dataloader_utils.process_source_id(source_id)
height = data['height']
width = data['width']
boxes = data['groundtruth_boxes']
boxes = box_utils.denormalize_boxes(boxes, tf.shape(image)[:2])
classes = data['groundtruth_classes']
is_crowds = data['groundtruth_is_crowd']
areas = data['groundtruth_area']
image = input_utils.normalize_image(image)
image, image_info = input_utils.resize_and_crop_image(
image,
params.retinanet_parser.output_size,
padded_size=input_utils.compute_padded_size(
params.retinanet_parser.output_size, 2 ** params.anchor.max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
anchors = anchor.Anchor(
params.anchor.min_level,
params.anchor.max_level,
params.anchor.num_scales,
params.anchor.aspect_ratios,
params.anchor.anchor_size,
image.get_shape().as_list()[:2])
labels = {
'anchor_boxes': anchors.multilevel_boxes,
'image_info': image_info,
}
groundtruths = {
'source_id': source_id,
'height': height,
'width': width,
'num_detections': tf.shape(classes),
'boxes': boxes,
'classes': classes,
'areas': areas,
'is_crowds': tf.cast(is_crowds, tf.int32),
}
return image, labels, groundtruths | 5,357,774 |
def AddDiskScopeFlag(parser):
"""Adds --disk-scope flag."""
parser.add_argument(
'--disk-scope',
choices={'zonal':
'The disk specified in --disk is interpreted as a '
'zonal disk in the same zone as the instance',
'regional':
'The disk specified in --disk is interpreted as a '
'regional disk in the same region as the instance'},
help='The scope of the disk.',
hidden=True,
default='zonal') | 5,357,775 |
def eval_blocking(lamb, mu, k):
"""Finds the blocking probability of a queue.
Args:
lamb (float): The rate into the queue.
mu (float): The rate out of the queue.
k (int): Maximum number of customers able to be in the queue.
"""
rho = lamb/mu
return rho**k*((1-rho)/(1-rho**(k+1))) | 5,357,776 |
def report_insurance_event(event_dict, autoID) :
"""
Send insurance event to insurance cloud endpoint
params:
event_dict: event_type, speed, gps-coordinates, time
autoID: object that containers drivername, driverID, vehicle_model and vehicleID
returns:
a dictionary that captures all the information that an insurance event must contain per the insurance cloud endpoint
"""
data_json = json.dumps(insurance_event_data(event_dict, autoID))
headers = {'Content-type': 'application/json'}
url = INSURANCE_URL + "add_event"
response = requests.post(url, data=data_json, headers=headers)
if constants.DEBUG:
print("*********** Insurance Reporting " + event_dict["EVENT_TYPE"] )
print(response.status_code) | 5,357,777 |
def get_dependent_columns(covar):
"""
Get the list of dependent columns
:param covar: The covariance matrix
:return: Dependent columns
"""
ind_columns = (np.where(~covar.any(axis=1))[0]).tolist()
dep_columns_z = []
for i in range(0, covar.shape[0]):
if i not in ind_columns:
dep_columns_z.append(i)
return exclude_linear_combination_variables(covar, dep_columns_z) | 5,357,778 |
def ProcessMSA():
"""Convert the MSA for use with PSI-Blast"""
#Check the inputs
if os.path.isfile(args.wildtype) == False:
print "ProcessMSA Error: The path to the file with the wild-type sequence is missing"
quit()
if os.path.isfile(args.msa) == False:
print "ProcessMSA Error: Cannot open the msa file"
quit()
if args.maxhits == None:
print "ProcessMSA: The maximum number of hits will be uncapped"
MAXHITS = -1
else:
try:
MAXHITS = int(args.maxhits)
if MAXHITS < 1:
print "ProcessMSA Error: The maximum number of hits will be uncapped due to a negative or zero value given"
MAXHITS = -1
except ValueError:
print "ProcessMSA Error: The maximum number of hits will be uncapped due to an error on the command line argument given"
MAXHITS = -1
#Get the Wild-Type amino acid sequence
WTNAME, WTSEQ, WTLEN = WildType()
#
#Step one: Import msa alignment from MUSCLE and make it one line per sequence
Alignment = ""
Output = ""
with open(args.msa, 'r') as infile: #Open the file with the wild-type protein sequence
for line in infile:
#Check to see if we have a header
if line[0] == ">":
if len(Output) > 0: #Ignores empty output
Alignment = Alignment + Output + "\n" #Add the current output to the growing alignment varible
Output = "" #Empty the current alignment
Output = Output + line.rstrip('\n') + "," #Assemble the first line of the new sequence
else:
Output = Output + line.rstrip('\n') #Keep assembling the line
f = open('msatemp.csv', 'w')
f.write(Alignment)
f.close()
#
#Step two: Import MSA into a lookup table
MSATable = {}
Output = ""
with open('msatemp.csv', 'r') as infile: #Open the previous file
for line in infile:
NUMCOMMAS = line.count(',') #Count the number of commas (the fix for multiple commas in name)
split = line.split(",")
if len(line) > 10: #Avoids empty lines from the file
MSATable.update({split[0] : split[NUMCOMMAS].rstrip("\n")}) #Add a new entry to the array
Output = Output + split[NUMCOMMAS] #Write the sequence (the highest numbered split entry)
f = open('msatemp2.csv', 'w')
f.write(Output)
f.close()
#
#Step three: Mark the insertions with the letter Z (fixed from X as X is also used in the blast hits)
ZedOut = ""
WildtypeMSA = MSATable[WTNAME] + "\n" #Get the wild-type MSA sequence (with insertions)
WTMSALEN = len(WildtypeMSA)
with open('msatemp2.csv', 'r') as infile:
for line in infile:
for i in xrange(0, WTMSALEN):
if WildtypeMSA[i] == "-":
ZedOut = ZedOut + "Z"
else:
ZedOut = ZedOut + line[i]
f = open('msatemp3.csv', 'w')
f.write(ZedOut)
f.close()
#
#Step four: Delete the insertions
Output = ""
with open('msatemp3.csv', 'r') as infile:
for line in infile:
Len = len(line)
for i in xrange(0, Len):
if line[i] != "Z":
Output = Output + line[i]
f = open('msatemp4.csv', 'w')
f.write(Output)
f.close()
#
#Step five: Put wild-type on top, re-order the sequences by completeness, and cap the hits for psi-blast
PSITable = []
with open('msatemp4.csv', 'r') as infile:
for line in infile:
#Set the WT to the top and then add the rest into a list with their counts of dashes
if line == WTSEQ + "\n":
PSITable.insert(0, {'sequence' : line, 'counts' : line.count('-'), 'wt' : True}) #Move the wild-type to the top
else:
PSITable.append({'sequence' : line, 'counts' : line.count('-'), 'wt' : False})
#Sort the table into a new list by counts and wt
PSITable.sort(key=lambda k : (k['counts'], -k['wt']))
#Iterate the list to a writeable string
Output = ""
Count = 1
for x in PSITable:
if MAXHITS == -1: #Unlimited number of output sequences
Output = Output + x['sequence']
else:
if Count <= MAXHITS: #Add to the output if below or equal to the max given
Output = Output + x['sequence']
Count = Count + 1
f = open('MSAForSplitting.csv', 'w')
f.write(Output)
f.close()
return | 5,357,779 |
def results_to_answers(guess_hints, answers):
"""Provide remaining valid answers matching a list of guesses and
corresponding hints
"""
gh_stack = guess_hints.copy()
new_ans = answers.copy()
while len(gh_stack) > 0:
gh = gh_stack.pop()
guess = gh[0]
hint = gh[1]
new_ans = answers_guess_hint_to_answers(new_ans, guess, hint)
return new_ans | 5,357,780 |
def plt_figure(title, xlabel, ylabel, x_list, y_list, x_lim, y_lim):
"""
- 通过各参数,画出一张折线图
"""
plt.figure()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if x_lim:
plt.xlim(x_lim)
if y_lim:
plt.ylim(y_lim)
plt.plot(x_list, y_list) | 5,357,781 |
def traverse_backward(outputs, fn):
"""Backward traversal function through the graph.
Traverses the graph going from outputs to inputs. The provided function is
applied once to each module reached this way. This function is used to
implement other functionality that requires traversing the graph. ``fn``
typically has side effects, e.g., see :func:`is_specified` and
:func:`get_unassigned_hyperparameters`. See also: :func:`traverse_forward`.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named of
outputs to start the traversal at.
fn ((deep_architect.core.Module) -> (bool)): Function to apply to each
module. Returns ``True`` if the traversal is to be stopped.
"""
memo = set()
output_lst = sorted_values_by_key(outputs)
ms = extract_unique_modules(output_lst)
for m in ms:
is_over = fn(m)
if is_over:
break
else:
for ix in itervalues(m.inputs):
if ix.is_connected():
m_prev = ix.get_connected_output().get_module()
if m_prev not in memo:
memo.add(m_prev)
ms.append(m_prev) | 5,357,782 |
def get_archive_map(data: DataFrame, row_col: Optional[str] = "ROW") -> Series:
"""
Get a series mapping object names to archive names
:param data: Dataset with archive names as ARCHIVE column and object names
in index
:type data: DataFrame
:param row_col: column with rol index, defaults to "ROW". Set to None if
not applicable
:type row_col: str, optional
:return: Series mapping object names to archive names
:rtype: Series
"""
archive_map = data.ARCHIVE.drop_duplicates()
if row_col is not None:
archive_map = archive_map.droplevel(row_col)
return archive_map | 5,357,783 |
def gain_ratio(x_mat: ndarray, y_row: ndarray, prop: int, prop_values: Iterable, gain_value: float = None) -> float:
"""
计算使用属性 prop 对样本集进行划分的信息增益率,值越大表示使用属性 prop 进行划分
所获得的纯度提升越大。此方法对可取值数目较少的属性有所偏好
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出向量。是一个只有一个维度的行向量,要和 x_mat 匹配
:param prop: 进行划分的属性
:param prop_values: 属性的取值
:param gain_value: 信息增益。给出该值是为了避免重复计算。
:return: 信息增益率
"""
prop_x = x_mat[:, prop]
prop_y_num = []
for v in prop_values:
prop_y_num.append(len(y_row[prop_x == v]))
m = y_row.shape[0]
intrinsic_value = 0
for num in prop_y_num:
tmp = num / m
intrinsic_value = intrinsic_value - tmp * (0 if math.isclose(tmp, 0) else math.log2(tmp))
if gain_value is None:
gain_value = gain(x_mat, y_row, prop, prop_values)
return gain_value / intrinsic_value | 5,357,784 |
def _LengthError(e: ByteList):
"""Check if the length of the EDID is a multiple of 128.
Args:
e: The list form of the EDID to be checked.
Returns:
A list of error.Error objects, or None.
"""
if not len(e) % 128:
return None
else:
return [
error.Error(
"Overall EDID",
"Invalid length",
"Length % 128 = 0",
"Length %% 128 = %d" % (len(e) % 128),
)
] | 5,357,785 |
def generate_states(
start: int = 0,
stop: int = 14,
n_states: int = 100,
parity: Union[str, int] = "both"
):
"""
Generate correct string for input to `kshell_ui.py` when asked for
which states to calculate. Copy the string generated by this
function and paste it into `kshell_ui.py` when it prompts for
states.
Parameters
----------
start : int
The lowest spin value.
stop : int
The largest spin value.
n_states : int
The number of states per spin value.
parity : Union[str, int]
The parity of the states. Allowed values are: 1, -1, 'both',
'positive', 'negative', 'pos', 'neg', '+', '-'.
Examples
--------
``` python
>>> import kshell_utilities as ksutil
>>> ksutil.generate_states(start=0, stop=3, n_states=100, parity="both")
0+100, 0.5+100, 1+100, 1.5+100, 2+100, 2.5+100, 3+100, 0-100, 0.5-100, 1-100, 1.5-100, 2-100, 2.5-100, 3-100,
```
"""
allowed_positive_parity_inputs = ["positive", "pos", "+", "1", "+1", 1, "both"]
allowed_negative_parity_inputs = ["negative", "neg", "-", "-1", -1, "both"]
def correct_syntax(lst):
for elem in lst:
print(elem, end=", ")
if parity in allowed_positive_parity_inputs:
positive = [f"{i:g}{'+'}{n_states}" for i in np.arange(start, stop+0.5, 0.5)]
correct_syntax(positive)
if parity in allowed_negative_parity_inputs:
negative = [f"{i:g}{'-'}{n_states}" for i in np.arange(start, stop+0.5, 0.5)]
correct_syntax(negative) | 5,357,786 |
def __single_auc_score__(feature_i,
clf,
cv_indices,
X,
y,
sample_weight=None):
"""Method determining the 'area under curve' for a single test set.
This function is intended for internal use.
Parameters
----------
feature_i: int
Index of the tested feature.
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
cv_indices: list of tuples
Indices for all the cross validation steps. They are explicit
pass, so all test sets use the same splitting.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contain the sample weights.
None in the case of no weights.
Returns
-------
feature_i: int
Index of the tested feature. It is need as a return value for
asynchronous parallel processing
auc_score: float
Returns calculated auc score.
"""
y_pred = np.zeros_like(y, dtype=float)
for i, [train_idx, test_idx] in enumerate(cv_indices):
X_train = X[train_idx]
X_test = X[test_idx]
y_train = y[train_idx]
if sample_weight is None:
sample_weight_train = None
sample_weight_test = None
else:
sample_weight_train = sample_weight[train_idx]
sample_weight_test = sample_weight[test_idx]
clf = clf.fit(X=X_train,
y=y_train,
sample_weight=sample_weight_train)
y_pred[test_idx] = clf.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y, y_pred, sample_weight=sample_weight_test)
return feature_i, auc_score | 5,357,787 |
def IntCurveSurface_ThePolygonToolOfHInter_Bounding(*args):
"""
:param thePolygon:
:type thePolygon: IntCurveSurface_ThePolygonOfHInter &
:rtype: Bnd_Box
"""
return _IntCurveSurface.IntCurveSurface_ThePolygonToolOfHInter_Bounding(*args) | 5,357,788 |
def identity_show(client, resource_group_name, account_name):
"""
Show the identity for Azure Cognitive Services account.
"""
sa = client.get(resource_group_name, account_name)
return sa.identity if sa.identity else {} | 5,357,789 |
def parsing_input(program_parameters):
"""
Parses apart the command line or qsub submission file to get all user input parameters
for analyzing the data. Function also prints all user parameters to
command line, so a user can monitor the inputs. Also, default settings are
set in this function and are overwritten if a user provides the parameter
instead.
: Param program_parameters: Name of the parameter file being parsed
: Return dictionary: Returns a dictionary of all paramters for later parts of the
program (lots of variables)
"""
# Default parameters (will be overridden by user---input)
# Variant Level Default Paramters
quality_score_min= 20
# Default parameter for indel distance
indel_exclusion_region_length = 1
# Sample level default parameters
min_total_read_count=20
# Meta-Analysis Cutoff Values
meta_BH_adj_p_value_cutoff = 0.05
meta_sample_p_value_cutoff = 0.05
# Multi-Dimensional P-Value Cutoff Value
multi_dim_adjust_pvalue_cutoff = 0.05
# Currenting working directory default parameters (same directory as program)
working_directory= os.getcwd()
input_file_location = working_directory+'\\'
# Global Output_File_Location
output_file_location = working_directory+'/' #for UNIX environment this symbol is required and it works fine in PC submission
# binomial probability value (50/50 Test)
binomial_probability_value = 0.5
###LEGACY VARIABLE KEPT FOR LATER DEVELOPMENT######
# Variables originally created to be modified, but later
# in development realized obsolete
min_numb_of_samples = 1
numb_ref_alleles_allowed = 1
numb_alt_alleles_allowed = 1
parsed_parameters=program_parameters.split("--")
# print (parsed_parameters)
for x in range(1, len(parsed_parameters)):
inputs = parsed_parameters[x].split(" ")
if inputs[0] == "File_Name":
file_name = inputs[1]
elif inputs[0] == "Indel_Exclusion_Region_Length":
indel_exclusion_region_length = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(indel_exclusion_region_length)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Indel_Exclusion_Region_Length- incorrect input")
print ("Incorrect input was: ", indel_exclusion_region_length)
sys.exit()
elif inputs[0] == "Minimum_Number_of_Samples_for_ASE":
min_numb_of_samples = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(min_numb_of_samples)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Minimum_Number_of_Samples_for_ASE- incorrect input")
print ("Incorrect input was: ", min_numb_of_samples)
sys.exit()
elif inputs[0] == "Number_of_Reference_Alleles_Allowed":
numb_ref_alleles_allowed = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(numb_ref_alleles_allowed)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Number_of_Reference_Alleles_Allowed- incorrect input")
print ("Incorrect input was: ", numb_ref_alleles_allowed)
sys.exit()
elif inputs[0] == "Number_of_Alternative_Alleles_Allowed":
numb_alt_alleles_allowed = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(numb_alt_alleles_allowed)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Number_of_Alternative_Alleles_Allowed- incorrect input")
print ("Incorrect input was: ", numb_alt_alleles_allowed)
sys.exit()
elif inputs[0] == "Quality_Score_Minimum_for_Variants":
quality_score_min = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(quality_score_min)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Quality_Score_Minimum_for_Variants- incorrect input")
print ("Incorrect input was: ", quality_score_min)
sys.exit()
elif inputs[0] == "Minimum_Read_Counts":
min_total_read_count = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(min_total_read_count)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Minimum_Read_Counts- incorrect input")
print ("Incorrect input was: ", min_total_read_count)
sys.exit()
elif inputs[0] == "Meta_BH_adj_p_value_cutoff":
meta_BH_adj_p_value_cutoff = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(meta_BH_adj_p_value_cutoff)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check META_FDR_p_value- incorrect input")
print ("Incorrect input was: ", meta_BH_adj_p_value_cutoff)
sys.exit()
elif inputs[0] == "Meta_sample_p_value_cutoff":
meta_sample_p_value_cutoff = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(meta_sample_p_value_cutoff)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check META_binomial_p_value- incorrect input")
print ("Incorrect input was: ", meta_sample_p_value_cutoff)
sys.exit()
elif inputs[0] == "Multi_Dim_adjust_pvalue_cutoff":
multi_dim_adjust_pvalue_cutoff = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(multi_dim_adjust_pvalue_cutoff)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Multi_Dim_adjust_pvalue_cutoff- incorrect input")
print ("Incorrect input was: ", multi_dim_adjust_pvalue_cutoff)
sys.exit()
elif inputs[0] == "Binomial_Probability_Value":
binomial_probability_value = inputs[1]
#Testing user input (verify numeric value)
result = test_Number_Input(binomial_probability_value)
#If the result passes do this or if the result fails do something else
if result =='Pass':
pass
else:
print ("")
print ("ERROR Alert!")
print ("Please check Binomial_Probability_Value- incorrect input")
print ("Incorrect input was: ", binomial_probability_value)
sys.exit()
elif inputs[0] == "Output_File_Location":
output_file_location = inputs[1]
# prints the help menu prompt
elif inputs[0] == "help" or inputs[0]=="h" or inputs[0]=="Help":
printing_help_menu=help_menu_prompt()
else:
print ("")
print ("ERROR Alert!")
print ("Please double check your input parameters, something was not quite right")
print ("Type: --help to see a list of options and acceptable input for the program")
sys.exit()
# Printing user settings to terminal in case program crashes out before completion
print ("")
print ("")
print ("Exact User Parameter Settings")
print ("")
print ("The input file is: ", file_name)
print ("The output directory for analysis is: ", output_file_location)
print ("")
print ("")
print ("The minimum qualtity score (phred score) for a variant is: ", quality_score_min)
print ("The indel exclusion region length from identified indels is: ", indel_exclusion_region_length)
# print ("The minimum number of samples to count a variant for ASE is: ", min_numb_of_samples)
print ("The number of allowable reference alleles is (currently program is limited to one): ", numb_ref_alleles_allowed)
print ("The number of allowable alternative alleles is (currently program is limited to one): ", numb_alt_alleles_allowed)
print ("The minimum number of total read counts for a sample per variant is: ", min_total_read_count)
print ("")
print ("")
print ("The binomial probability value for ASE testing is: ", binomial_probability_value)
print ("")
print ("")
print ("Meta-Analysis of Data")
print ("The Meta BH adjusted p-value cutoff is: ", meta_BH_adj_p_value_cutoff)
print ("The p-value cutoff used for estimated tallying of samples is: ", meta_sample_p_value_cutoff)
print ("")
print ("Multi-Dimensional P-Value Adjustment")
print ("The p-value cutoff for testing is: ", multi_dim_adjust_pvalue_cutoff)
print ("")
print ("")
# Returns a dictionary of all the variables
return{'file_name':file_name, 'indel_exclusion_region_length':indel_exclusion_region_length,
'min_numb_of_samples':min_numb_of_samples, 'numb_ref_alleles_allowed':numb_ref_alleles_allowed,
'numb_alt_alleles_allowed':numb_alt_alleles_allowed, 'quality_score_min':quality_score_min,
'min_total_read_count':min_total_read_count, 'binomial_probability_value': binomial_probability_value,
'meta_BH_adj_p_value_cutoff':meta_BH_adj_p_value_cutoff, 'meta_sample_p_value_cutoff': meta_sample_p_value_cutoff,
'multi_dim_adjust_pvalue_cutoff': multi_dim_adjust_pvalue_cutoff, 'output_file_location':output_file_location} | 5,357,790 |
def normalize_country_code(country_code):
""" Normalize country codes a bit by making capitalization consistent and
removing trailing comments (and other words). """
if not country_code:
return country_code
country_code = re.match(r'^(\w+)', country_code).group(1)
return country_code.upper() | 5,357,791 |
def get_angle(p1, p2):
"""Get the angle between two points."""
return math.atan2(p2[1] - p1[1], p2[0] - p1[0]) | 5,357,792 |
def test_thanks(client):
"""Test thanks page."""
rv = client.get('/thanks/test10.jpg&&10')
assert b'Thanks' in rv.data | 5,357,793 |
def list_accessors(gltf):
"""
アクセサーを列挙する
:param gltf: glTFオブジェクト
:return: アクセッサー列挙(generator)
"""
for skin in gltf['skins']:
yield skin['inverseBindMatrices']
for mesh in gltf['meshes']:
for primitive in mesh['primitives']:
yield primitive['indices']
for attr_value in primitive['attributes'].values():
yield attr_value
if 'targets' in primitive:
for target in primitive['targets']:
for target_value in target.values():
yield target_value | 5,357,794 |
def per_application():
"""
:return:
a seeder function that always returns 1, ensuring at most one delegate is ever spawned
for the entire application.
"""
return lambda msg: 1 | 5,357,795 |
def get_job_information(url):
""" Uses bs4 to grab the information from each job container based on the url.
Parameters
----------
url : str
Career builder url of any job
Returns
------
job_data : dict
Contains Job Name, Company Name, Job Location, Description, Skills and apply link.
"""
website = requests.get(url).text
job_soup = BeautifulSoup(website, 'html.parser')
job_name = "N/A"
try:
job_name = job_soup.select('h2.h3')[0].getText()
except Exception as err:
print(f"The job tile could not be selected properly")
print(err)
print(f'Skipping {url}...')
company_name = "N/A"
try:
company_name = job_soup.select('.data-details > span:nth-child(1)')[0].getText()
except Exception as err:
print(f"The company name could not be selected properly")
print(err)
print(f'Skipping {url}...')
job_location = "N/A"
try:
job_location = job_soup.select('.data-details > span:nth-child(2)')[0].getText()
except Exception as err:
print(f"The location could not be selected properly")
print(err)
print(f'Skipping {url}...')
job_description = job_soup.select('#jdp_description > div.col-2 > div.col.big.col-mobile-full > p')
job_description_2 = job_soup.select('#jdp_description > div:nth-child(1) > div:nth-child(1)')
desc = [ ]
for idx, paragraph in enumerate(job_description):
desc.append(job_description[idx].text)
if len(desc) == 0:
for idx, paragraph in enumerate(job_description_2):
desc.append(job_description_2[idx].text)
job_skills = [ ]
skills_container = job_soup.findAll("div", {"class": "check-bubble"})
for idx, skill in enumerate(skills_container):
job_skills.append(skills_container[idx].text)
job_data = {'Job Title': job_name,
'Company': company_name,
'Location': job_location,
'Description': desc,
'Skills': job_skills,
'Application Url': url}
return job_data | 5,357,796 |
def idiv(self, other):
"""Compute the element-wise division.
Parameters
----------
other : Union[dragon.Tensor, number]
The value to divide.
Returns
-------
dragon.Tensor
The self.
See Also
--------
`dragon.math.div(...)`_
"""
return _apply_binary_op([self, other], 'Div', [self]) | 5,357,797 |
def getAudioMetadata(fileRef):
"""Extract metadata for audio file"""
args = [config.mediaInfoExe]
args.append("--Output=EBUCore")
args.append(fileRef)
# Command line as string (used for logging purposes only)
cmdStr = " ".join(args)
status, out, err = shared.launchSubProcess(args)
# Configure XML parser to get rid of blank lines in MediaInfo output
parser = etree.XMLParser(remove_blank_text=True)
# Parse string to element
outElt = etree.XML(out.encode('utf-8'), parser=parser)
# Main results to dictionary
dictOut = {}
dictOut["cmdStr"] = cmdStr
dictOut["status"] = status
dictOut["outElt"] = outElt
dictOut["stderr"] = err
return dictOut | 5,357,798 |
def simulation_test(**kwargs):
"""Decorate a unit test and mark it as a simulation test.
The arguments provided to this decorator will be passed to
:py:meth:`~reviewbot.tools.testing.testcases.BaseToolTestCase
.setup_simulation_test`.
Args:
**kwargs (dict):
Keyword arguments to pass during setup.
Returns:
callable:
The new unit test function.
"""
def _dec(func):
func.simulation_setup_kwargs = kwargs
return func
return _dec | 5,357,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.