content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def sphdist(ra1, dec1, ra2, dec2):
"""measures the spherical distance between 2 points
Inputs:
(ra1, dec1) in degrees
(ra2, dec2) in degrees
Outputs:
returns a distance in degrees
"""
dec1_r = deg2rad(dec1)
dec2_r = deg2rad(dec2)
return 2. * rad2deg( arcsin( sqrt( ( sin((dec1_r - dec2_r) / 2)) ** 2 + cos(dec1_r) * cos(dec2_r) * ( sin((deg2rad(ra1 - ra2)) / 2)) ** 2))) | 5,354,100 |
def send_conn_stats(sfe, prefix):
"""
calculates iSCSI connection stats at both cluster
and node levels and submits them to Graphite.
Calls ListConnections
"""
result = sfe.list_iscsisessions().to_json()['sessions']
tgts = []
accts = []
for i in range(len(result)):
tgts.append(result[i]['targetIP'].split(':')[0])
accts.append(result[i]['initiatorIP'].split(':')[0])
if to_graphite:
graphyte.send(prefix + '.iscsiActiveSessionCount', len(result))
graphyte.send(prefix + '.iscsiTargetCount', len(set(tgts)))
else:
LOG.warning(prefix + '.iscsiActiveSessionCount ' + str(len(result)))
LOG.warning(prefix + '.iscsiTargetCount ' + str(len(set(tgts)))) | 5,354,101 |
def parse_kwargs(kwargs, a_list):
"""
extract values from kwargs or set default
"""
if a_list is not None:
num_colors = len(a_list)
default_colors = generate_colors(num_colors)
else:
num_colors = 1
default_colors = 'k'
logscale = kwargs.get('logscale', [False, False])
Range = kwargs.get('Range', [[], []])
colors = kwargs.get('colors', default_colors)
figure_name = kwargs.get('figure_name', None)
show = kwargs.get('show', True)
dist = kwargs.get('dist', None)
values = [logscale, Range, colors, figure_name, show, dist]
return values | 5,354,102 |
def talib_WCLPRICE(DataFrame):
"""WCLPRICE - Weighted Close Price 加权收盘价"""
res = talib.WCLPRICE(DataFrame.high.values, DataFrame.low.values,
DataFrame.close.values)
return pd.DataFrame({'WCLPRICE': res}, index=DataFrame.index) | 5,354,103 |
def is_pi_parallel(ring1_center: np.ndarray,
ring1_normal: np.ndarray,
ring2_center: np.ndarray,
ring2_normal: np.ndarray,
dist_cutoff: float = 8.0,
angle_cutoff: float = 30.0) -> bool:
"""Check if two aromatic rings form a parallel pi-pi contact.
Parameters
----------
ring1_center, ring2_center: np.ndarray
Positions of centers of the two rings. Can be computed with the
compute_ring_center function.
ring1_normal, ring2_normal: np.ndarray
Normals of the two rings. Can be computed with the compute_ring_normal
function.
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (0deg) angle between
the rings (in degrees).
Returns
-------
bool
True if two aromatic rings form a parallel pi-pi.
"""
dist = np.linalg.norm(ring1_center - ring2_center)
angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi
if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and
dist < dist_cutoff):
return True
return False | 5,354,104 |
def cipd_dep_impl(repository_ctx):
"""A rule that generates a CIPD dependency.
Args:
repository_ctx: A RepositoryContext.
"""
ensure_path = ".ensure"
repository_ctx.template(
ensure_path,
Label("@rules_cipd//cipd/internal:ensure.tpl"),
{
"{PATH}": repository_ctx.attr.path,
"{TAG}": repository_ctx.attr.id,
},
)
repository_ctx.symlink(
repository_ctx.attr.build_file,
"BUILD.bazel",
)
ensure_result = repository_ctx.execute([
repository_ctx.path(repository_ctx.attr._cipd_client),
"ensure",
"-root",
".",
"-ensure-file",
ensure_path,
])
if ensure_result.return_code:
fail(ensure_result.stderr) | 5,354,105 |
def _fetch_alleninf_coords(*args, **kwargs):
"""
Gets updated MNI coordinates for AHBA samples, as shipped with `alleninf`
Returns
-------
coords : :class:`pandas.DataFrame`
Updated MNI coordinates for all AHBA samples
References
----------
Updated MNI coordinates taken from https://github.com/chrisfilo/alleninf,
which is licensed under the BSD-3 (reproduced here):
Copyright (c) 2018, Krzysztof Gorgolewski
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
coords = resource_filename('abagen', 'data/corrected_mni_coordinates.csv')
coords = pd.read_csv(coords).rename(dict(corrected_mni_x='mni_x',
corrected_mni_y='mni_y',
corrected_mni_z='mni_z'),
axis=1)
return coords.set_index('well_id') | 5,354,106 |
def get_classes_for_mol_network(can: canopus.Canopus,
hierarchy: List[str],
npc_hierarchy: List[str],
class_p_cutoff: float,
max_class_depth: Union[int, None]) -> \
DefaultDict[str, List[Union[str, Dict[str, List[Tuple[
Union[str, float]]]]]]]:
"""Loop through mol network and gather CF and NPC classes
:param can: Canopus object of canopus results with gnps mol network data
:param hierarchy: the CF class level names to be included in output in
order of hierarchy
:param npc_hierarchy: the NPC class level names to be included in output in
order of hierarchy
:param class_p_cutoff: probability cutoff for including a class
:param max_class_depth: max class depth for finding CF class
:return: classes output - dict of lists of {componentindex: [cluster index,
formula, {CF_level: [(class, prob)]}, {NPC_level: [(class, prob)]}]}
CF classes are found by looking for the class at deepest depth (or
max_class_depth) and then ordering these deepest classes based on priority.
Then, the classes are traced back to higher hierarchy and sorted in output,
again based on priority of deepest classes.
"""
results = defaultdict(list)
for node_id, node in can.gnps.nodes.items():
# get canopus compound obj
compound = can.sirius.compounds.get(node_id)
if compound:
cf_classes_dict = get_cf_classes(can, compound, hierarchy,
class_p_cutoff, max_class_depth)
npc_classes_dict = get_npc_classes(can, compound, npc_hierarchy)
formula = compound.formula
comp_id = node.componentId
if comp_id == '-1': # handling of singleton -1 components
comp_id += f"_{node_id}"
results[comp_id].append(
[node_id, formula, cf_classes_dict, npc_classes_dict])
return results | 5,354,107 |
def list_books(books):
"""Creates a string that, on each line, informs about a book."""
return '\n'.join([f'+ {book.name}: {book.renew_count}: {book.return_date}'
for book in books]) | 5,354,108 |
def getHSPLNamespace():
"""
Retrieve the namespace of the HSPL XML.
@return: The namespace of the HSPL XML.
"""
return HSPL_NAMESPACE | 5,354,109 |
def _geom_points(geom):
"""GeoJSON geometry to a sequence of point tuples
"""
if geom['type'] == 'Point':
yield tuple(geom['coordinates'])
elif geom['type'] in ('MultiPoint', 'LineString'):
for position in geom['coordinates']:
yield tuple(position)
else:
raise InvalidFeatureError(
"Unsupported geometry type:{0}".format(geom['type'])) | 5,354,110 |
def cli(file, series, xaxis, output_file):
"""Plot validation metrics from a Topaz training run.
<file> is the results.txt file from standalone Topaz or the model_plot.star file from Topaz run within RELION."""
data = pd.read_csv(file, delim_whitespace=True, index_col=xaxis, na_values='-')
grouped = data.groupby('split')
if series in ['loss', 'ge_penalty', 'precision', 'auprc']:
fig, ax = plt.subplots(ncols=1, nrows=1)
grouped[series].plot(legend=True, ax=ax)
ax.set_xlabel(xaxis)
ax.set_ylabel(series)
ax.set_title(f'{series} as a function of {xaxis}')
elif series == ['tpr', 'fpr']:
fig, axs = plt.subplots(ncols=2, nrows=1, sharex=True, sharey=True, figsize=(10, 5))
fig.suptitle(f'True and false positive rates as a function of {xaxis}')
for key, ax in zip(grouped.groups.keys(), axs.flatten()):
grouped.get_group(key)[series].plot(legend=True, ax=ax)
ax.set_title(f'{key}')
ax.set_xlabel(xaxis)
axs[0].set_ylabel('True or false positive rate')
fig.tight_layout()
if output_file:
fig.figsize = (11.80, 8.85)
fig.dpi = 300
plt.savefig(output_file)
else:
plt.show() | 5,354,111 |
def test_EmpiricalCovariance_validates_mahalanobis():
"""Checks that EmpiricalCovariance validates data with mahalanobis."""
cov = EmpiricalCovariance().fit(X)
msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input"
with pytest.raises(ValueError, match=msg):
cov.mahalanobis(X[:, :2]) | 5,354,112 |
def wait_for_not_found(delete_func, show_func, *args, **kwargs):
"""Call the delete function, then wait for it to be 'NotFound'
:param delete_func: The delete function to call.
:param show_func: The show function to call looking for 'NotFound'.
:param ID: The ID of the object to delete/show.
:raises TimeoutException: The object did not achieve the status or ERROR in
the check_timeout period.
:returns: None
"""
try:
delete_func(*args, **kwargs)
except exceptions.NotFound:
return
start = int(time.time())
LOG.info('Waiting for object to be NotFound')
while True:
try:
show_func(*args, **kwargs)
except exceptions.NotFound:
return
if int(time.time()) - start >= CONF.load_balancer.check_timeout:
message = ('{name} did not raise NotFound in {timeout} '
'seconds.'.format(
name=show_func.__name__,
timeout=CONF.load_balancer.check_timeout))
raise exceptions.TimeoutException(message)
time.sleep(CONF.load_balancer.check_interval) | 5,354,113 |
def configure_ssl_conn():
"""Configures required settings for an SSL based OVSDB client connection
:return: None
"""
req_ssl_opts = {'ssl_key_file': cfg.CONF.OVS.ssl_key_file,
'ssl_cert_file': cfg.CONF.OVS.ssl_cert_file,
'ssl_ca_cert_file': cfg.CONF.OVS.ssl_ca_cert_file}
for ssl_opt, ssl_file in req_ssl_opts.items():
if not ssl_file:
raise ovsdb_exc.OvsdbSslRequiredOptError(ssl_opt=ssl_opt)
if not os.path.exists(ssl_file):
raise ovsdb_exc.OvsdbSslConfigNotFound(ssl_file=ssl_file)
# TODO(ihrachys): move to ovsdbapp
Stream.ssl_set_private_key_file(req_ssl_opts['ssl_key_file'])
Stream.ssl_set_certificate_file(req_ssl_opts['ssl_cert_file'])
Stream.ssl_set_ca_cert_file(req_ssl_opts['ssl_ca_cert_file']) | 5,354,114 |
def add_lead_zero(num,digit,IgnoreDataManipulation=False,RaiseDataManipulationError=False,DigitMustAtLeastTwo=False):
"""Add leading the letters '0' to inputted integer 'num' according to defined 'digit' and return as string.
Required keyword arguments:
- num (int) : Integer (can be positive, zero, or negative)
- digit (int) : How much digits of number should be in returned string.
Optional keyword arguments:
- IgnoreDataManipulation (bool) : Avoid raising acceptable data manipulation warning.
- RaiseDataManipulationError (bool) : Raise every data manipulation warning as error exception. (IgnoreDataManipulation must be False.)
- DigitMustAtLeastTwo (bool) : Raise warning or error if defined digit is less than 2.
Data manipulation error:
- Digit should be at least 2. (Ignore by default)
- Amount of defined digits is less than digits of number in inputted integer.
"""
if type(num) is not int or type(digit) is not int: raise TypeError('parameters \'num\', \'digit\' should be integer.')
if type(IgnoreDataManipulation) is not bool or type(RaiseDataManipulationError) is not bool or type(DigitMustAtLeastTwo) is not bool: raise TypeError('parameters \'IgnoreDataManipulation\', \'RaiseDataManipulationError\', and \'DigitMustAtLeastTwo\' should be boolean.')
if IgnoreDataManipulation: RaiseDataManipulationError=False
if digit<1: raise ValueError('Digit should be at least one.')
if digit<2 and DigitMustAtLeastTwo:
msg='Amount of digits should be at least 2.'
if not IgnoreDataManipulation and not RaiseDataManipulationError: alternative_warn(msg,ValueWarning,'add_lead_zero')
if RaiseDataManipulationError: raise ValueError(msg)
# Reuse variable 'digit'
if num>=0:
num=str(num)
IsNegative=False
else:
num=str(abs(num))
IsNegative=True
digit=digit-len(num)
if digit>0:
for x in range(0,digit):
# Reuse variable 'num'
num='0'+num
if not IsNegative: return num
else: return '-'+num
elif digit==0:
if not IsNegative: return num
else: return '-'+num
else:
msg='Defined digits amount is less than digits of number in inputted integer. It possibly means that some of used data has been manipulated incorrectly.'
if not IgnoreDataManipulation and not RaiseDataManipulationError: alternative_warn(msg,ValueWarning,'add_lead_zero')
if RaiseDataManipulationError: raise ValueError(msg)
if not IsNegative: return num
else: return '-'+num | 5,354,115 |
def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
"""
Attaches servers to a monitoring policy.
"""
try:
attach_servers = []
for _server_id in servers:
server_id = get_server(oneandone_conn, _server_id)
attach_server = oneandone.client.AttachServer(
server_id=server_id
)
attach_servers.append(attach_server)
if module.check_mode:
if attach_servers:
return True
return False
monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
monitoring_policy_id=monitoring_policy_id,
servers=attach_servers)
return monitoring_policy
except Exception as ex:
module.fail_json(msg=str(ex)) | 5,354,116 |
def get_generator_contingency_fcas_availability_term_2(data, trader_id, trade_type, intervention) -> Union[float, None]:
"""Get generator contingency FCAS term 2"""
# Parameters
lower_slope_coefficient = get_lower_slope_coefficient(data, trader_id, trade_type)
if lower_slope_coefficient == 0:
return None
enablement_min = lookup.get_trader_quantity_band_attribute(data, trader_id, trade_type, '@EnablementMin', float)
reg_target = lookup.get_trader_solution_attribute(data, trader_id, '@L5RegTarget', float, intervention)
energy_target = lookup.get_trader_solution_attribute(data, trader_id, '@EnergyTarget', float, intervention)
return (energy_target - reg_target - enablement_min) / lower_slope_coefficient | 5,354,117 |
def format_pvalue(p_value, alpha=0.05, include_equal=True):
"""
If p-value is lower than 0.05, change it to "<0.05", otherwise, round it to two decimals
:param p_val: input p-value as a float
:param alpha: significance level
:param include_equal: include equal sign ('=') to pvalue (e.g., '=0.06') or not (e.g., '0.06')
:return: p_val: processed p-value (replaced by "<0.05" or rounded to two decimals) as a str
"""
if p_value < alpha:
p_value = "<" + str(alpha)
else:
if include_equal:
p_value = '=' + str(round(p_value, 3))
else:
p_value = str(round(p_value, 3))
return p_value | 5,354,118 |
def _compute_y(x, ll):
"""Computes y."""
return np.sqrt(1 - ll ** 2 * (1 - x ** 2)) | 5,354,119 |
def cmd_help(cmd):
"""
Performs "help cmd", i.e, displays the syntax of individual command.
Parameters
-----------
cmd: str
The command to show more help information.
"""
print()
if cmd not in cmd_list:
print("Unrecognized command. Enter \"help [cmd]\" for function syntax, \"help\" for list of available commands")
else:
print("Description:", commands_dict[cmd], "\n")
print("Usage:")
cmd_list[cmd]("syntax") | 5,354,120 |
def create_axis(length=1.0, use_neg=True):
"""
Create axis.
:param length:
:param use_neg: If False, Only defined in Positive planes
:return: Axis object
"""
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-length * use_neg, 0.0, 0.0, 1.0, 0.0, 0.0,
length, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, -length * use_neg, 0.0, 0.0, 1.0, 0.0,
0.0, length, 0.0, 0.0, 1.0, 0.0,
0.0, 0.0, -length * use_neg, 0.0, 0.0, 1.0,
0.0, 0.0, length, 0.0, 0.0, 1.0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1,
2, 3,
4, 5]
return Shape(vertices, indices) | 5,354,121 |
def genmatrix(list, combinfunc, symmetric=False, diagonal=None):
"""
Takes a list and generates a 2D-matrix using the supplied combination
function to calculate the values.
PARAMETERS
list - the list of items
combinfunc - the function that is used to calculate teh value in a cell.
It has to cope with two arguments.
symmetric - Whether it will be a symmetric matrix along the diagonal.
For example, it the list contains integers, and the
combination function is abs(x-y), then the matrix will be
symmetric.
Default: False
diagonal - The value to be put into the diagonal. For some functions,
the diagonal will stay constant. An example could be the
function "x-y". Then each diagonal cell will be "0".
If this value is set to None, then the diagonal will be
calculated.
Default: None
"""
matrix = []
row_index = 0
for item in list:
row = []
col_index = 0
for item2 in list:
if diagonal is not None and col_index == row_index:
# if this is a cell on the diagonal
row.append(diagonal)
elif symmetric and col_index < row_index:
# if the matrix is symmetric and we are "in the lower left triangle"
row.append( matrix[col_index][row_index] )
else:
# if this cell is not on the diagonal
row.append(combinfunc(item, item2))
col_index += 1
matrix.append(row)
row_index += 1
return matrix | 5,354,122 |
def get_all_raw_codes_by_area(area: EmisPermArea) -> list:
"""
Returns a list of code names for all permissions within a logical area,
for all possible modes.
"""
return get_raw_codes_by_area(
area, EmisPermMode.CREATE | EmisPermMode.UPDATE | EmisPermMode.VIEW
) | 5,354,123 |
def cartesian_pair(df1, df2, **kwargs):
"""
Make a cross join (cartesian product) between two dataframes by using a constant temporary key.
Also sets a MultiIndex which is the cartesian product of the indices of the input dataframes.
See: https://github.com/pydata/pandas/issues/5401
:param df1 dataframe 1
:param df1 dataframe 2
:param kwargs keyword arguments that will be passed to pd.merge()
:return cross join of df1 and df2
"""
df1['_tmpkey'] = 1
df2['_tmpkey'] = 1
res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)
df1.drop('_tmpkey', axis=1, inplace=True)
df2.drop('_tmpkey', axis=1, inplace=True)
return res | 5,354,124 |
def mpls(ctx):
"""CRM resource MPLS address-family"""
ctx.obj["crm"].addr_family = 'mpls' | 5,354,125 |
def checa_cuenta(sock, usuario):
"""Checar cuanto hay en una cuenta"""
data = {}
with open(ARCHIVO_CUENTAS, 'r') as data_file:
data = json.load(data_file)
if usuario in data:
chat(sock, "El usuario {0} tiene {1} ponejonedas.".format(usuario, data[usuario]))
else:
chat(sock, "El usuario {0} no tiene una cuenta.".format(usuario)) | 5,354,126 |
def messageBox(title, s):
"""Отображение диалогового окна с сообщением
:param title: заголовок окна
:param s: сообщение
"""
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(s)
msg.setWindowTitle(title)
msg.exec_() | 5,354,127 |
def make_js_debug():
"""make debug js files"""
target_path = os.path.join(CURRENT_PATH,"web","static")
with lcd(target_path):
local("browserify main.js -d true > main_bundle.js") | 5,354,128 |
def get_cases_by_landkreise_3daysbefore():
"""
Return all Hospitals
"""
hospitals_aggregated = db.session.query(CasesPerLandkreis3DaysBefore).all()
return jsonify(__as_feature_collection(hospitals_aggregated)), 200 | 5,354,129 |
def shape_list(x, out_type=tf.int32):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x, out_type=out_type)
return [dynamic[i] if s is None else s for i, s in enumerate(static)] | 5,354,130 |
def coerce_file(fn):
"""Coerce content of given file to something useful for setup(), turn :
.py into mock object with description and version fields,
.md into rst text. Remove images with "nopypi" alt text along the way.
:url: https://github.com/Kraymer/setupgoon
"""
import ast
import os
import re
import subprocess # noqa
text = open(os.path.join(os.path.dirname(__file__), fn)).read()
if fn.endswith('.py'): # extract version, docstring etc out of python file
mock = type('mock', (object,), {})()
for attr in ('version', 'author', 'author_email', 'license'):
regex = r'^__%s__\s*=\s*[\'"]([^\'"]*)[\'"]$' % attr
m = re.search(regex, text, re.MULTILINE)
setattr(mock, attr, m.group(1) if m else None)
mock.docstring = ast.get_docstring(ast.parse(text))
return mock
if fn.endswith('md') and 'upload' in sys.argv: # convert md to rest on pypi package upload
text = '\n'.join([l for l in text.split('\n') if '![nopypi' not in l])
p = subprocess.Popen(['pandoc', '-t', 'rst'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
text, stderr = p.communicate(text)
return text | 5,354,131 |
def load_config(
config_file: str, print_warnings: bool = False
) -> InfestorConfiguration:
"""
Loads an infestor configuration from file and validates it.
"""
try:
with open(config_file, "r") as ifp:
raw_config = json.load(ifp)
except:
raise ConfigurationError(f"Could not read configuration: {config_file}")
configuration, warnings, errors = parse_config(raw_config)
if print_warnings:
warning_items = "\n".join([f"- {warning}" for warning in warnings])
if warnings:
print(
f"Warnings when loading configuration file ({config_file}):\n{warning_items}"
)
if errors:
error_items = "\n".join([f"- {error}" for error in errors])
error_message = (
f"Errors loading configuration file ({config_file}):\n{error_items}"
)
raise ConfigurationError(error_message)
return cast(InfestorConfiguration, configuration) | 5,354,132 |
def what_do_you_mean_response(ctx: Context) -> REPLY_TYPE:
"""Generate response when we are asked about subject of the dialog
Returns:
template phrase based on previous skill or intent or topic
confidence (can be 0.0, DONTKNOW_CONF, UNIVERSAL_RESPONSE_CONF, SUPER_CONF)
human attributes (empty),
bot attributes (empty),
attributes (empty or MUST_CONTINUE)
"""
dialog = ctx.misc["agent"]["dialog"]
attr = {}
try:
what_do_you_mean_intent = get_what_do_you_mean_intent(dialog["human_utterances"][-1])
if not (what_we_talk_about(dialog["human_utterances"][-1]) or what_do_you_mean_intent):
reply, confidence = "", 0
elif len(dialog.get("human_utterances", [])) < 2:
reply, confidence = DONTKNOW_PHRASE, DONTKNOW_CONF
else:
reply = get_bot_based_on_skill_reply(dialog.get("bot_utterances", []))
if reply is None:
reply = get_bot_based_on_topic_or_intent_reply(
dialog["human_utterances"][-2] if len(dialog["human_utterances"]) > 1 else []
)
if reply is None:
reply, confidence = DONTKNOW_PHRASE, DONTKNOW_CONF
else:
if what_we_talk_about(dialog["human_utterances"][-1]):
confidence = SUPER_CONF
attr = {"can_continue": MUST_CONTINUE}
else:
# what_do_you_mean_intent but not regexp
confidence = UNIVERSAL_RESPONSE_CONF
except Exception as e:
logger.exception("exception in grounding skill")
logger.info(str(e))
sentry_sdk.capture_exception(e)
reply = ""
confidence = 0
return reply, confidence, {}, {}, attr | 5,354,133 |
def procrustes(X,Y):
"""Finds the optimal affine transformation T to minimize ||x-Ty||_F
Parameters
----------
x - reference, shape(x)=nxd where n is number of samples and d is dimension
y - to be aligned, shape(x)=nxd
Returns
-------
Z - the transformed y
TODO: return T - the transformation
TODO: make scaling, reflection, centering optional
TODO: allow different dimension
"""
assert(X.shape == Y.shape)
# Center
muX = np.mean(X,axis=0)
muY = np.mean(Y,axis=0)
X0 = X-muX
Y0 = Y-muY
# Scale
varX = np.var(X0,axis=0)
varY = np.var(Y0,axis=0)
#Rotate
l,d,m = linalg.svd(X0.T.dot(Y0))
Z = np.sqrt(np.sum(varX)/np.sum(varY))*Y0.dot(m).dot(l.T)+muX
return Z | 5,354,134 |
def get_market_tops(symbols=None, **kwargs):
"""
MOVED to iexfinance.iexdata.get_tops
"""
import warnings
warnings.warn(WNG_MSG % ("get_market_tops", "iexdata.get_tops"))
return TOPS(symbols, **kwargs).fetch() | 5,354,135 |
def gen_protrusion_index(psaia_dir, psaia_config_file, file_list_file):
"""Generate protrusion index for file list of PDB structures."""
logging.info("PSAIA'ing {:}".format(file_list_file))
_psaia(psaia_dir, psaia_config_file, file_list_file) | 5,354,136 |
def get_query_results(query_execution_id):
"""Retrieve result set from Athena query"""
athena_client = SESSION.client('athena')
result_set = []
query = athena_client.get_query_execution(QueryExecutionId=query_execution_id)
logger.debug(query)
query_state = query['QueryExecution']['Status']['State']
logger.debug(query_state)
if query_state in ['FAILED', 'CANCELLED']:
raise QueryFailed("Query failed to execute")
if query_state in ['QUEUED', 'RUNNING']:
raise QueryStillRunning("Query still running")
try:
results = athena_client.get_query_results(QueryExecutionId=query_execution_id)
logger.debug(results)
for result in results["ResultSet"]["Rows"][1:]:
result_set.append(result["Data"])
logger.debug(result_set)
except ClientError as cle:
logger.debug(cle)
if not result_set:
raise NoResults("Athena ResultSet {result_set}".format(result_set=result_set))
return result_set | 5,354,137 |
def reduce_output_path(path=None, pdb_name=None):
"""Defines location of Reduce output files relative to input files."""
if not path:
if not pdb_name:
raise NameError(
"Cannot save an output for a temporary file without a PDB"
"code specified")
pdb_name = pdb_name.lower()
output_path = Path(global_settings['structural_database']['path'],
pdb_name[1:3].lower(), pdb_name[:4].lower(),
'reduce', pdb_name + '_reduced.mmol')
else:
input_path = Path(path)
if len(input_path.parents) > 1:
output_path = input_path.parents[1] / 'reduce' / \
(input_path.stem + '_reduced' + input_path.suffix)
else:
output_path = input_path.parent / \
(input_path.stem + '_reduced' + input_path.suffix)
return output_path | 5,354,138 |
def get_recommended_meals():
"""[summary]
Returns:
[type]: [description]
"""
url = "https://themealdb.p.rapidapi.com/randomselection.php"
headers = {
"x-rapidapi-host": "themealdb.p.rapidapi.com",
"x-rapidapi-key": os.getenv("RAPIDAPI"),
}
response = requests.request("GET", url, headers=headers).json()
list_of_food = []
list_of_image = []
for food in response["meals"]:
list_of_food.append(food["strMeal"])
for image in response["meals"]:
list_of_image.append(image["strMealThumb"])
return list_of_food, list_of_image | 5,354,139 |
def link_discord(request: HttpRequest):
"""Page to prompt user to link their discord account to their user account."""
skip_confirmation = request.GET.get("skip-confirm")
if skip_confirmation and skip_confirmation == "true":
return redirect("discord_register")
return render(request, "link_discord.html") | 5,354,140 |
def rpc(f=None, **kwargs):
"""Marks a method as RPC."""
if f is not None:
if isinstance(f, six.string_types):
if 'name' in kwargs:
raise ValueError('name option duplicated')
kwargs['name'] = f
else:
return rpc(**kwargs)(f)
return functools.partial(_rpc, **kwargs) | 5,354,141 |
def fig_fits_h(fig, y):
"""Lista ut of figuren *fig* far plats pa hojden pa skarmen vid
position *x*, *y*
"""
_, h = _get_max_width()
win_h = fig.window.winfo_height()
result = (y + win_h) < h
return result | 5,354,142 |
def find_executable(name):
"""
Find executable by ``name`` by inspecting PATH environment variable, return
``None`` if nothing found.
"""
for dir in os.environ.get('PATH', '').split(os.pathsep):
if not dir:
continue
fn = os.path.abspath(os.path.join(dir, name))
if os.path.exists(fn):
return os.path.abspath(fn) | 5,354,143 |
def index():
"""
Handler for the root url. Loads all movies and renders the first page.
"""
if path_set():
load_movies()
return flask.render_template('main.html') | 5,354,144 |
def __hitScore__(srcMZ, targetMZ, srcRT, targetRT, parameters):
# type: (float, float, float, float, LFParameters) -> float
"""Return the hit score of the target frame for the given source
frame.
Keyword Arguments:
srcMZ -- source m/z
targetMZ -- target m/z
srcRT -- source retention time
targetRT -- target retention time
parameters -- LipidFinder's Amalgamator parameters instance
"""
mzDelta = mz_delta(srcMZ, parameters['mzFixedError'],
parameters['mzPPMError'])
mzDiff = abs(srcMZ - targetMZ)
rtDelta = rt_delta(parameters['maxRTDiffAdjFrame'])
rtDiff = abs(srcRT - targetRT)
return sqrt(min(mzDiff / mzDelta, 1.0) ** 2 \
+ min(rtDiff / rtDelta, 1.0) ** 2) | 5,354,145 |
def compute_propeller_nonuniform_freestream(prop, upstream_wake,conditions):
""" Computes the inflow velocities in the frame of the rotating propeller
Inputs:
prop. SUAVE propeller
tip_radius - propeller radius [m]
rotation - propeller rotation direction [-]
thrust_angle - thrust angle of prop [rad]
number_radial_stations - number of propeller radial stations [-]
number_azimuthal_stations - number of propeller azimuthal stations [-]
upstream_wake.
u_velocities - Streamwise velocities from upstream wake
v_velocities - Spanwise velocities from upstream wake
w_velocities - Downwash velocities from upstream wake
VD - Vortex distribution from upstream wake
conditions.
frames
Outputs:
Va Axial velocities at propeller [m/s]
Vt Tangential velocities at propeller [m/s]
Vr Radial velocities at propeller [m/s]
"""
# unpack propeller parameters
Vv = conditions.frames.inertial.velocity_vector
R = prop.tip_radius
rotation = prop.rotation
c = prop.chord_distribution
Na = prop.number_azimuthal_stations
Nr = len(c)
ua_wing = upstream_wake.u_velocities
uv_wing = upstream_wake.v_velocities
uw_wing = upstream_wake.w_velocities
VD = upstream_wake.VD
# Velocity in the Body frame
T_body2inertial = conditions.frames.body.transform_to_inertial
T_inertial2body = orientation_transpose(T_body2inertial)
V_body = orientation_product(T_inertial2body,Vv)
body2thrust = prop.body_to_prop_vel()
T_body2thrust = orientation_transpose(np.ones_like(T_body2inertial[:])*body2thrust)
V_thrust = orientation_product(T_body2thrust,V_body)
# azimuth distribution
psi = np.linspace(0,2*np.pi,Na+1)[:-1]
psi_2d = np.tile(np.atleast_2d(psi),(Nr,1))
# 2 dimensiona radial distribution non dimensionalized
chi = prop.radius_distribution /R
# Reframe the wing induced velocities:
y_center = prop.origin[0][1]
# New points to interpolate data: (corresponding to r,phi locations on propeller disc)
points = np.array([[VD.YC[i], VD.ZC[i]] for i in range(len(VD.YC))])
ycoords = np.reshape((R*chi*np.cos(psi_2d).T).T,(Nr*Na,))
zcoords = prop.origin[0][2] + np.reshape((R*chi*np.sin(psi_2d).T).T,(Nr*Na,))
xi = np.array([[y_center+ycoords[i],zcoords[i]] for i in range(len(ycoords))])
ua_w = sp.interpolate.griddata(points,ua_wing,xi,method='linear')
uv_w = sp.interpolate.griddata(points,uv_wing,xi,method='linear')
uw_w = sp.interpolate.griddata(points,uw_wing,xi,method='linear')
ua_wing = np.reshape(ua_w,(Nr,Na))
uw_wing = np.reshape(uw_w,(Nr,Na))
uv_wing = np.reshape(uv_w,(Nr,Na))
if rotation == [1]:
Vt_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.cos(psi_2d) + np.array(uv_wing)*np.sin(psi_2d) ) # velocity tangential to the disk plane, positive toward the trailing edge eqn 6.34 pg 165
Vr_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.sin(psi_2d) - np.array(uv_wing)*np.cos(psi_2d) ) # radial velocity , positive outward
Va_2d = V_thrust[:,0]* np.array(ua_wing) # velocity perpendicular to the disk plane, positive downward eqn 6.36 pg 166
else:
Vt_2d = V_thrust[:,0]*( np.array(uw_wing)*np.cos(psi_2d) - np.array(uv_wing)*np.sin(psi_2d) ) # velocity tangential to the disk plane, positive toward the trailing edge
Vr_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.sin(psi_2d) - np.array(uv_wing)*np.cos(psi_2d) ) # radial velocity , positive outward
Va_2d = V_thrust[:,0]* np.array(ua_wing) # velocity perpendicular to the disk plane, positive downward
# Append velocities to propeller
prop.tangential_velocities_2d = Vt_2d
prop.radial_velocities_2d = Vr_2d
prop.axial_velocities_2d = Va_2d
return prop | 5,354,146 |
def _expect_const(obj):
"""Return a Constant, or raise TypeError."""
if obj in (0, "0"):
return ZERO
if obj in (1, "1"):
return ONE
if obj in ("x", "X"):
return LOGICAL
if obj == "?":
return ILLOGICAL
if isinstance(obj, Constant):
return obj
raise TypeError("Expected obj to be a Constant") | 5,354,147 |
def _eval_input_receiver_fn(tf_transform_output, schema, label_key):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
label_key: the name of the transformed label
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
features = serving_input_receiver.features.copy()
transformed_features = tf_transform_output.transform_raw_features(features)
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=serving_input_receiver.receiver_tensors,
labels=transformed_features[label_key]) | 5,354,148 |
def get_lpar_names(adp):
"""Get a list of the LPAR names.
:param adp: A pypowervm.adapter.Adapter instance for the PowerVM API.
:return: A list of string names of the PowerVM Logical Partitions.
"""
return [x.name for x in pvm_lpar.LPAR.search(adp, is_mgmt_partition=False)] | 5,354,149 |
def init_mlp(in_dim, out_dim, hidden_dim, num_layers, non_linearity=None, bias=True):
"""Initializes a MultilayerPerceptron.
Args:
in_dim: int
out_dim: int
hidden_dim: int
num_layers: int
non_linearity: differentiable function (tanh by default)
bias (bool)
Returns: a MultilayerPerceptron with the architecture
x -> Linear(in_dim, hidden_dim) -> non_linearity ->
...
Linear(hidden_dim, hidden_dim) -> non_linearity ->
Linear(hidden_dim, out_dim) -> y
where num_layers = 0 corresponds to
x -> Linear(in_dim, out_dim) -> y
"""
if non_linearity is None:
non_linearity = nn.Tanh()
dims = [in_dim] + [hidden_dim for _ in range(num_layers)] + [out_dim]
return MultilayerPerceptron(dims, non_linearity, bias) | 5,354,150 |
def c_grad_curry_regularized(data, target):
"""A closure constructor with regularization term for functional."""
def loss(layerweight):
model = (lambda x: layerweight @ x.t())
reg = 1e-3 * (layerweight**2).sum()/2
return criterion(model(data).t(), target) + reg
return loss | 5,354,151 |
def test_texture_constructor_hit_box_algo():
"""
Test the different hitbox algorithms
"""
Texture(name="default")
Texture(name="simple", hit_box_algorithm="Simple")
Texture(name="detailed", hit_box_algorithm="Detailed")
Texture(name="allowsnonehitbox", hit_box_algorithm=None)
Texture(name="old_behavior_preserved", hit_box_algorithm="None")
with pytest.raises(ValueError):
Texture(name="random", hit_box_algorithm="definitely invalid")
arcade.cleanup_texture_cache() | 5,354,152 |
def add_lus_from_json(your_lexicon_folder,
fn_en,
json_path,
skos,
verbose=0):
"""
:param verbose:
:param your_lexicon_folder:
:param fn_en:
:param json_path:
:return:
"""
json_lus = json.load(open(json_path))
status = []
for lu in json_lus['lus']:
the_timestamp = lu['timestamp']
if lu['timestamp'] is not None:
year, month, day = lu['timestamp']
the_timestamp = datetime(year=year, month=month, day=day)
succes = add_lu(your_lexicon_folder,
fn_en,
lu_name=lu['lu_name'],
lexemes=lu['lexemes'],
definition=lu['definition'],
status=lu['status'],
pos=lu['POS'],
frame=lu['frame'],
agent=lu['agent'],
provenance=lu['provenance'],
lu_type=lu['lu_type'],
incorporated_fe=lu['incorporated_fe'],
timestamp=the_timestamp,
skos=skos,
skos_predicate_to_external_references=lu['skos_predicate_to_external_references'],
verbose=verbose)
if succes:
status.append('added')
else:
status.append('failed to add')
if verbose:
print(f'{len(status)} LUs were provided to be added.')
print(f'the process resulted in: {Counter(status)}') | 5,354,153 |
def convert_examples_to_features_yake(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', noi_token='[NOI]', pad_token=0,
sequence_a_segment_id=0,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True, args=None):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
noi_token_id = tokenizer.convert_tokens_to_ids(noi_token)
num_exm = len(examples)
idf_dict = {}
for (ex_index, example) in enumerate(examples):
if ex_index % 100000 == 0:
logger.info("Writing idf example %d of %d" % (ex_index, len(examples)))
if args.model_name_or_path == 'bert-base-uncased' or args.model_name_or_path == 'bert-large-uncased':
tokens_a = tokenizer.tokenize(example.text_a)
elif args.model_name_or_path == 'bert-base-cased':
tokens_a = example.text_a.split()
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
for t in tokens:
idf_dict[t] = idf_dict.get(t, 0) + 1
for t in idf_dict.keys():
idf_dict[t] = idf_dict[t] / num_exm
stop_words = set(stopwords.words('english') )
for t in stop_words:
if t in idf_dict:
idf_dict[t] *= 0.001
inp = " ".join(idf_dict.keys())
spacy_nlp = spacy.load('en_core_web_sm')
inp_results = [(token.text, token.tag_) for token in spacy_nlp(inp)]
allowed_tags = ['VB','NN','JJ','RB'] # UH for "yes", "no", etc.
ignored_words = ['was','were','be','is','are','am',"'s","'re"] + ['do','did','done','does'] # verb of no info
for word, tag in inp_results:
if word in idf_dict.keys():
if len(tag)>=2 and tag[:2] in allowed_tags and (word not in ignored_words):
if tag[:2] in ['VB','NN']:
idf_dict[word] *= 4
else:
idf_dict[word] *= 2
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
tf = Counter(tokens)
tokens_len = float(len(tokens))
# score: higher will be more likely to be keeped
prob_list = np.array([idf_dict[t] * tf[t] / tokens_len for t in tokens])
# prob_list = np.array([idf_dict[t] for t in tokens])
# add yake
key_word_len = 100
kw_extractor = yake.KeywordExtractor()
keywords = kw_extractor.extract_keywords(" ".join(tokens))
key_word_len = len(keywords)
for i, t in enumerate(tokens):
if t in keywords:
prob_list[i] *= 100
# Repeat words
for i, t in enumerate(tokens):
if t in tokens[:i]:
prob_list[i] /= 10
prob_list = max(prob_list) - prob_list
N = len(tokens)
lm_label_ids = [noi_token_id] * max_seq_length
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("lm_label_ids: %s" % " ".join([str(x) for x in lm_label_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids))
while N > 1:
mask_pos = np.array(house_robber(prob_list))
unmask_pos = np.setdiff1d(np.arange(N), mask_pos)
tokens = [t for i,t in enumerate(tokens) if i in unmask_pos]
N = len(tokens)
# mask_lm_label_ids = input_ids
lm_label_ids = [pad_token] * max_seq_length
j=0
i = 1
while i < len(prob_list):
if i in mask_pos:
lm_label_ids[j] = input_ids[i]
i += 2
else:
lm_label_ids[j] = noi_token_id
i += 1
j += 1
# print(i,j)
while j < len(unmask_pos):
lm_label_ids[j] = noi_token_id # no input for last token of new sequence
j+= 1
prob_list = prob_list[unmask_pos]
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(unmask_pos)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
segment_ids = [sequence_a_segment_id] * len(tokens)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("lm_label_ids: %s" % " ".join([str(x) for x in lm_label_ids]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids))
return features | 5,354,154 |
def test_measure_surface_properties_2d():
"""This tests that measure_surface_properties
raises a ValueError when a 2D label image is passed
"""
label_image, label_indices = make_test_label_image_2d()
with pytest.raises(ValueError):
_ = measure_surface_properties_from_labels(label_image) | 5,354,155 |
def sq_to_hr(bins, rho, S_k, k, axis=1):
"""
Takes the structure factor s(q) and computes the real space
total correlation function h(r)
"""
# setup scales
dr = np.pi / (k[0] * bins)
radius = dr * np.arange(1, bins + 1, dtype=np.float)
# Rearrange to find total correlation function from structure factor
H_k = (S_k - 1.) / rho
# # Transform back to real space
iFT = idst(H_k * k[:bins], type=1, axis=axis)
normalisation = bins * k[0] / (4 * np.pi**2 * radius) / (bins + 1)
h_r = normalisation * iFT
return h_r, radius | 5,354,156 |
def scale17(data, factor):
"""Solution to exercise C-1.17.
Had we implemented the scale function (page 25) as follows, does it work
properly?
def scale(data, factor):
for val in data:
val *= factor
Explain why or why not.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
No, it doesn't work. Per the text, page 21:
"It is worth noting that val is treated as a standard identifier. If the
element of the original data happens to be mutable, the val identifier can
be used to invoke its methods. But a reassignment of identifier val to a
new value has no affect on the original data, nor on the next iteration of
the loop."
The code above fails because it tries to assign a new value to the "val"
identifier. This merely breaks the alias without changing the list.
"""
for val in data:
val *= factor
return data | 5,354,157 |
def initialize_database() -> sqlite3.Connection:
"""Create a sqlite3 database stored in memory with two tables to hold
users, records and history. Returns the connection to the created database."""
with sqlite3.connect("bank_buds.db") as conn:
conn.execute("""CREATE TABLE IF NOT EXISTS user(
customer_id TEXT NOT NULL,
firstName TEXT NOT NULL,
lastName TEXT NOT NULL,
userName TEXT NOT NULL,
userPass TEXT NOT NULL,
balance INTEGER NOT NULL)""")
conn.execute("""CREATE TABLE IF NOT EXISTS user_record(
rec_id TEXT REFERENCES user NOT NULL,
wins INTEGER NOT NULL,
losses INTEGER NOT NULL)""")
conn.execute("""CREATE TABLE IF NOT EXISTS challenge_history(
challenge_id INTEGER NOT NULL,
challenge_starter TEXT REFERENCES user NOT NULL,
challenge_opponent TEXT REFERENCES user NOT NULL,
challenge_winner TEXT REFERENCES user NOT NULL,
challenge_loser TEXT REFERENCES user NOT NULL,
is_active INTEGER NOT NULL,
goal INTEGER NOT NULL)""")
return conn | 5,354,158 |
def stateless_multinomial(logits,
num_samples,
seed,
output_dtype=dtypes.int64,
name=None):
"""Draws deterministic pseudorandom samples from a multinomial distribution.
This is a stateless version of `tf.random.categorical`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.random.stateless_categorical(
tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17])
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log-probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A shape [2] integer Tensor of seeds to the random number generator.
output_dtype: integer type to use for the output. Defaults to int64.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "stateless_multinomial", [logits, seed]):
return stateless_multinomial_categorical_impl(logits, num_samples,
output_dtype, seed) | 5,354,159 |
def integration(c, opts=None, pty=True):
"""
Run the integration test suite. May be slow!
"""
# Abort if no default shell on this system - implies some unusual dev
# environment. Certain entirely-standalone tests will fail w/o it, even if
# tests honoring config overrides (like the unit-test suite) don't.
shell = c.config.global_defaults()["run"]["shell"]
if not c.run("which {}".format(shell), hide=True, warn=True):
err = "No {} on this system - cannot run integration tests! Try a container?" # noqa
raise Exit(err.format(shell))
opts = opts or ""
opts += " integration/"
test(c, opts=opts, pty=pty) | 5,354,160 |
def calc_kss(tag,vj):
"""
calculate Kolmogorov-Smirnov statistics as in CMap; Lamb J, Science, 2006
Parameters
----------
tag: tuple
tuple of up-/down-gene lists; (up,down)
sorted with the values in the descending order
vj: dict
dictionary corresponding to V(j) in CMap; Lamb J, Science, 2006
key, gene; val, rank
"""
a_up,b_up = _ab(tag[0],vj)
a_dn,b_dn = _ab(tag[1],vj)
if a_up > b_up:
ks_up = a_up
else:
ks_up = -1*b_up
if a_dn > b_dn:
ks_dn = a_dn
else:
ks_dn = -1*b_dn
if ks_up*ks_dn > 0:
ks = 0
else:
ks = ks_up - ks_dn
n = len(vj)
tu = len(tag[0])
td = len(tag[1])
kssmax = _kss_max(n,tu,td)
return ks/kssmax | 5,354,161 |
def encode(elem):
"""This is the general function to call when you wish to encode an
element and all its children and sub-children.
Encode in this context means to convert from pymm elements to
xml.etree.ElementTree elements.
Typically this is called by pymm.write()
"""
converter = ConversionHandler()
return converter.convert_element_hierarchy(elem, 'encode') | 5,354,162 |
def get_saml_provider_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSAMLProviderResult]:
"""
Resource Type definition for AWS::IAM::SAMLProvider
:param str arn: Amazon Resource Name (ARN) of the SAML provider
"""
... | 5,354,163 |
def setup_driver() -> None:
"""
Initialize webdriver.
"""
global driver
driver = webdriver.Firefox()
# This should help on waiting for element.
driver.implicitly_wait(WAIT_S) | 5,354,164 |
def register_blueprints(app: "Flask") -> "Flask":
"""A function to register flask blueprint.
To register blueprints add them like the example
Example usage:
from app.blueprints import blueprint
app.register_blueprint(blueprint)
Args:
app (Flask): Flask Application instance
Returns:
Flask: Flask Application instance
"""
app.register_blueprint(api_v1_bp)
return app | 5,354,165 |
def build_history_class(
cls: declarative.DeclarativeMeta,
prop: T_PROPS,
schema: str = None) -> nine.Type[TemporalProperty]:
"""build a sqlalchemy model for given prop"""
class_name = "%s%s_%s" % (cls.__name__, 'History', prop.key)
table = build_history_table(cls, prop, schema)
base_classes = (
TemporalProperty,
declarative.declarative_base(metadata=table.metadata),
)
class_attrs = {
'__table__': table,
'entity': orm.relationship(
lambda: cls,
backref=orm.backref('%s_history' % prop.key, lazy='dynamic'),
),
}
if isinstance(prop, orm.RelationshipProperty):
class_attrs[prop.key] = orm.relationship(prop.argument, lazy='noload')
model = type(class_name, base_classes, class_attrs)
return model | 5,354,166 |
def get_netcdfFluxesKxKy(self):
""" Read the fluxes versus (kx,ky) from the netcdf file of the simulation. """
# Initiate the attributes: Save the fluxes per (kx,ky)
self.pflx_kxky = np.empty((self.dim_time, self.dim_kx, self.dim_ky)); self.pflx_kxky[:,:,:] = np.NaN
self.qflx_kxky = np.empty((self.dim_time, self.dim_kx, self.dim_ky)); self.qflx_kxky[:,:,:] = np.NaN
self.vflx_kxky = np.empty((self.dim_time, self.dim_kx, self.dim_ky)); self.vflx_kxky[:,:,:] = np.NaN
# Read the data for each input file
for input_file in self.input_files:
# Show the reading progress
if self.Progress: i = self.input_files.index(input_file); length = len(self.input_files)
if self.Progress: self.Progress.move(i/length*100,"Reading the fluxes ("+str(i)+"/"+str(length)+")")
# Read the netcdf data to get the fluxes
netcdf_data = read_netcdf(input_file, ["fluxes"])
dim_time = self.dim_timePerFile[input_file]
for kx in self.vec_kxPerFile[input_file]:
for ky in self.vec_kyPerFile[input_file]:
i_kx = self.vec_kxPerFile[input_file].index(kx)
i_ky = self.vec_kyPerFile[input_file].index(ky)
index_kx = self.vec_kx.index(kx)
index_ky = self.vec_ky.index(ky)
self.pflx_kxky[0:dim_time, index_kx, index_ky] = netcdf_data['pflx_kxky'][:, i_kx, i_ky]
self.qflx_kxky[0:dim_time, index_kx, index_ky] = netcdf_data['qflx_kxky'][:, i_kx, i_ky]
self.vflx_kxky[0:dim_time, index_kx, index_ky] = netcdf_data['vflx_kxky'][:, i_kx, i_ky]
del netcdf_data
return | 5,354,167 |
def geo_distance(left, right):
"""
Compute distance between two geo spatial data
Parameters
----------
left : geometry or geography
right : geometry or geography
Returns
-------
distance : double scalar
"""
op = ops.GeoDistance(left, right)
return op.to_expr() | 5,354,168 |
def svn_dirent_local_style(*args):
"""svn_dirent_local_style(char dirent, apr_pool_t pool) -> char"""
return _core.svn_dirent_local_style(*args) | 5,354,169 |
def make_generator_model():
"""
The Generator
The generator uses `tf.keras.layers.Conv2DTranspose` (upsampling)
tf.keras.layers.to produce an image from a seed (random noise).
Start with a `Dense` layer that takes this seed as input,
then upsample several times until you reach the desired image size of 28x28x1.
Notice the `tf.keras.layers.LeakyReLU` activation for each layer, except the output layer which uses tanh.
:return:
"""
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(
tf.keras.layers.Conv2DTranspose(
128, (5, 5), strides=(1, 1), padding="same", use_bias=False
)
)
assert model.output_shape == (None, 7, 7, 128)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(
tf.keras.layers.Conv2DTranspose(
64, (5, 5), strides=(2, 2), padding="same", use_bias=False
)
)
assert model.output_shape == (None, 14, 14, 64)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(
tf.keras.layers.Conv2DTranspose(
1, (5, 5), strides=(2, 2), padding="same", use_bias=False, activation="tanh"
)
)
assert model.output_shape == (None, 28, 28, 1)
return model | 5,354,170 |
def get_project_postal_code() -> str:
"""
Returns:
str: value
""" | 5,354,171 |
def find_simple_cycles(dg):
""" Find all simple cycles given a networkx graph.
Args:
dg (obj): a networkx directed graph
Returns:
simple_cycles (list of lists): a list of simple cycles ordered by number of segments.
"""
simple_cycles = [c for c in nx.simple_cycles(dg) if len(c) > 2]
#simple_cycles.sort(key=lambda cycle: len(cycle), reverse=True) # sort by number of segments
return simple_cycles | 5,354,172 |
def augment_edge(edge_index: np.ndarray, nodes: np.ndarray,
edge_weight: np.ndarray = None, *,
nbrs_to_link: Optional[np.ndarray] = None,
common_nbrs: Optional[np.ndarray] = None,
fill_weight: float = 1.0) -> tuple:
"""Augment a set of edges by connecting nodes to
element in ``nbrs_to_link``.
Parameters
----------
edge_index: shape [M, 2] or [2, M]
edge indices of a Scipy sparse adjacency matrix.
nodes: the nodes that will be linked to the graph.
list or np.array: the nodes connected to `nbrs_to_link`
int: new added nodes connected to ``nbrs_to_link``,
node ids [num_nodes, ..., num_nodes+nodes-1].
edge_weight: shape [M,]
edge weights of a Scipy sparse adjacency matrix.
nbrs_to_link: a list of N elements,
where N is the length of 'nodes'.
the specified neighbor(s) for each added node.
if `None`, it will be set to `[0, ..., N-1]`.
common_nbrs: shape [None,].
specified common neighbors for each added node.
fill_weight: edge weight for the augmented edges.
NOTE:
-----
Both ``nbrs_to_link`` and ``common_nbrs`` should NOT be specified together.
See Also
--------
graphgallery.functional.augment_adj
"""
if nbrs_to_link is not None and common_nbrs is not None:
raise RuntimeError("Only one of them should be specified.")
edge_index = asedge(edge_index, shape="col_wise")
if edge_weight is None:
edge_weight = np.ones(edge_index.shape[1], dtype=gg.floatx())
num_nodes = edge_index.max() + 1
if gg.is_intscalar(nodes):
# int, add nodes to the graph
nodes = np.arange(num_nodes, num_nodes + nodes, dtype=edge_index.dtype)
else:
# array-like, link nodes to the graph
nodes = np.asarray(nodes, dtype=edge_index.dtype)
if common_nbrs is None and nbrs_to_link is None:
nbrs_to_link = np.arange(nodes.size, dtype=edge_index.dtype)
if not nodes.size == len(nbrs_to_link):
raise ValueError("The length of 'nbrs_to_link' should equal to 'nodes'.")
if nbrs_to_link is not None:
edges_to_link = np.hstack([np.vstack([np.tile(node, get_length(nbr)), nbr])
for node, nbr in zip(nodes, nbrs_to_link)])
else:
num_repeat = len(common_nbrs)
edges_to_link = np.hstack([np.vstack([np.tile(node, num_repeat), common_nbrs])
for node in nodes])
edges_to_link = np.hstack([edges_to_link, edges_to_link[[1, 0]]])
added_edge_weight = np.zeros(edges_to_link.shape[1], dtype=edge_weight.dtype) + fill_weight
augmented_edge_index = np.hstack([edge_index, edges_to_link])
augmented_edge_weight = np.hstack([edge_weight, added_edge_weight])
return augmented_edge_index, augmented_edge_weight | 5,354,173 |
def _get_str(j_data, key, default=None, range_val=None):
"""
Get data as str
:param j_data: Result of loading JSON
:param key: The value key to retrieve
:param default: Default value if not set
:param range_val: Range of values that can be set
:return:
"""
value = j_data.get(key, default)
if value is None:
sys.stderr.write('"%s" is required\n' % key)
return None
if not isinstance(value, unicode):
sys.stderr.write('"%s" choose from %s\n' % (key, range_val))
return None
if value not in range_val:
sys.stderr.write('"%s" choose from %s\n' % (key, range_val))
return None
return value | 5,354,174 |
def status(proc):
"""Check for processes status"""
if proc.is_alive==True:
return 'alive'
elif proc.is_alive==False:
return 'dead'
else:
return proc.is_alive() | 5,354,175 |
def is_plumed_file(filename):
"""
Check if given file is in PLUMED format.
Parameters
----------
filename : string, optional
PLUMED output file
Returns
-------
bool
wheter is a plumed output file
"""
headers = pd.read_csv(filename, sep=" ", skipinitialspace=True, nrows=0)
is_plumed = True if " ".join(headers.columns[:2]) == "#! FIELDS" else False
return is_plumed | 5,354,176 |
def detect_llj_xarray(da, inverse=False):
""" Identify local maxima in wind profiles.
args:
- da : xarray.DataArray with wind profile data
- inverse : to flip the array if the data is stored upside down
returns: : xarray.Dataset with vertical dimension removed containing:
- falloff : 0 or largest difference between local max and subseq min
- strength : 0 or wind speed at jet height
- index : -1 or index along <axis>
Note: vertical dimension should be labeled 'level' and axis=1
"""
# Move <axis> to first dimension, to easily index and iterate over it.
xv = np.rollaxis(da.values, 1)
if inverse:
xv = xv[::-1, ...]
# Set initial arrays
min_elem = xv[-1].copy()
max_elem = np.zeros(min_elem.shape)
max_diff = np.zeros(min_elem.shape)
max_idx = np.ones(min_elem.shape, dtype=int) * (-1)
# Start at end of array and search backwards for larger differences.
for i, elem in reversed(list(enumerate(xv))):
min_elem = np.minimum(elem, min_elem)
new_max_identified = elem - min_elem > max_diff
max_diff = np.where(new_max_identified, elem - min_elem, max_diff)
max_elem = np.where(new_max_identified, elem, max_elem)
max_idx = np.where(new_max_identified, i, max_idx)
# Combine the results in a dataframe
get_height = lambda i: np.where(i > 0, da.level.values[i], da.level.values[
-1])
dims = da.isel(level=0).drop('level').dims
coords = da.isel(level=0).drop('level').coords
lljs = xr.Dataset(
{
'falloff': (dims, max_diff),
'strength': (dims, max_elem),
'level': (dims, get_height(max_idx)),
},
coords=coords)
print(
'Beware! Level is also filled if no jet is detected! '
'Use ds.sel(level=lljs.level).where(lljs.falloff>0) to get rid of them'
)
return lljs | 5,354,177 |
def import_swissnames3d_places():
"""
import places from the SwissNAMES3D database
"""
with cd(get_project_root()):
run_python("manage.py import_swissnames3d_places") | 5,354,178 |
def test_problem_15(answer):
"""
test Problem test_problem_15(answer)
:return:
"""
from euler_python.easiest import p015
output = p015.problem015()
expected_output = answer['Problem 015']
assert output == expected_output | 5,354,179 |
def handle_urban(bot, ievent):
""" urban <what> .. search urban for <what> """
if len(ievent.args) > 0: what = " ".join(ievent.args)
else: ievent.missing('<search query>') ; return
try:
data = geturl2(url + urllib.quote_plus(what))
if not data: ievent.reply("word not found: %s" % what) ; return
data = json.loads(data)
if data['result_type'] == 'no_result': ievent.reply("word not found: %s" % what) ; return
res = []
for r in data['list']: res.append(r['definition'])
ievent.reply("result: ", res)
except Exception, ex: ievent.reply(str(ex)) | 5,354,180 |
def insert_trade_event(event: list):
"""
Writing a new event
:param event: dictionary to store
:return:
"""
print('[INFLUXDB] writing new tradeEvent\n', event)
__WRITE_API.write(__CURRENT_BUCKET, __INFLUXDB_CURRENT_ORG, event) | 5,354,181 |
def basic_scatter(file):
"""
Renders a basic scatter graph
:param file: path to file
:return: saves image to temp/scatter.png
"""
with open(file, 'r') as csvfile:
plotting = csv.reader(csvfile, delimiter=',')
next(plotting)
for row in plotting:
variables['x'].append(int(row[0]))
variables['y'].append(int(row[1]))
plt.scatter(variables['x'], variables['y'], label='Scatter', color='b')
plt.xlabel('X')
plt.ylabel('y')
plt.legend()
# plt.show()
plt.savefig('temp/scatter.png') | 5,354,182 |
def gen_blinds(depth, width, height, spacing, angle, curve, movedown):
"""Generate genblinds command for genBSDF."""
nslats = int(round(height / spacing, 0))
slat_cmd = "!genblinds blindmaterial blinds "
slat_cmd += "{} {} {} {} {} {}".format(
depth, width, height, nslats, angle, curve)
slat_cmd += "| xform -rz -90 -rx -90 -t "
slat_cmd += f"{-width/2} {-height/2} {-movedown}\n"
return slat_cmd | 5,354,183 |
def add_deformation_field_points(axes_2d, axes_3d, aircraft):
"""
Plot the deformation field points
Args:
:axes_2d: 2D axes object (matplotlib)
:axes_3d: 3D axes object (matplotlib)
:aircraft: (obj) aircraft
"""
axes_yz, axes_xz, axes_xy = axes_2d
for wing in aircraft.wings.values():
if wing.is_deformed:
for def_field in (wing.def_field, wing.def_field_mirror):
if def_field is not None:
points = def_field[:, 0:3]
_plot_XYZ_points(
axes_2d, axes_3d, points, symmetry=0,
linewidth=PS.LINEWIDTH_c, color=C.MAROON,
marker='o'
) | 5,354,184 |
def dev_Sonic(Mach, gamma=defg._gamma):
"""computes the deviation angle for a downstream SONIC Mach number
Args:
Mach: param gamma: (Default value = defg._gamma)
gamma: (Default value = defg._gamma)
Returns:
"""
return deflection_Mach_sigma(Mach, sigma_Sonic(Mach, gamma=gamma), gamma=gamma) | 5,354,185 |
def query(request):
"""传入一个查询字符串,返回匹配到的文章id。
Args:
request (GET): queryString:String 查询的字符串
categories:String/Int 文章所属的领域,多个领域使用逗号分隔,例如"math.CO,quant-ph"
timeStart:String yyyy-mm 最早发表日期(含),both included
timeEnd: String yyyy-mm 最晚发表日期(含),both included
offset: int 起始位置(例如,offset=100,默认一页显示20条,那么返回搜索结果的第100-119项,方便前端分页。)
Returns:
json
一个排序好的list,按相关性从高到低,最多count项。
一个int,表示一共多少个结果。
例:
{[(arxiv_id, title, abstract, authors, update_date)*20],50}
表示一共有50个搜索结果,本次查询返回的20个结果是上面显示的20个
"""
ret_list = []
ret_dict = {'ret_list': ret_list, 'num': 0}
# 解析request信息
query_string_raw = request.GET.get("queryString")
categories_raw = request.GET.get("categories")
time_start_raw = request.GET.get("timeStart")
time_end_raw = request.GET.get("timeEnd")
offset = int(request.GET.get("offset"))
# 时间提取
time_start_year = time_start_raw[:4]
time_start_month = time_start_raw[-2:]
time_end_year = time_end_raw[:4]
time_end_month = time_end_raw[-2:]
# category info extraction
categories = categories_raw.split(',')
# preprocess and stemming
query_string_list_1 = [stem(query) for query in preprocess(query_string_raw)]
query_string_list_1.sort()
query_string_list = tuple(query_string_list_1)
# return arxiv_ids by search words
arxiv_ids, wc = search_by_words(word_list=query_string_list)
# return arxiv_docs by arxiv_ids
arxiv_docs = get_arxiv_document_by_ids(arxiv_ids)
# 条件筛选
for doc in arxiv_docs:
flag = True
# 使用文章类别筛选
if judge_category(categories, doc.categories):
flag = flag and True
else:
flag = False
# 使用发表年、月筛选
# TODO:如果doc的update_date为空怎么办
doc_year = doc.update_date.split('-')[0]
doc_month = doc.update_date.split('-')[1]
if (time_start_year == doc_year) and (time_start_month <= doc_month):
flag = flag and True
elif (time_end_year == doc_year) and (doc_month <= time_end_month):
flag = flag and True
elif time_start_year <= doc_year <= time_end_year:
flag = flag and True
else:
flag = False
if flag:
ret_list.append((doc.arxiv_id, doc.title,
doc.abstract, doc.authors, doc.update_date))
ret_dict['num'] = len(ret_list)
ret_dict['total'] = wc
# 边界条件
if len(ret_list) <= offset:
ret_dict['ret_list'] = ret_list[:]
elif offset < len(ret_list) <= (offset + 20):
ret_dict['ret_list'] = ret_list[offset:]
else:
ret_dict['ret_list'] = ret_list[offset:offset + 20]
return HttpResponse(json.dumps(ret_dict)) | 5,354,186 |
def get_prev_and_next_lexemes(request, current_lexeme):
"""Get the previous and next lexeme from the same language, ordered
by meaning and then alphabetically by form"""
lexemes = list(Lexeme.objects.filter(
language=current_lexeme.language).order_by(
"meaning", "phon_form", "romanised", "id"))
ids = [l.id for l in lexemes]
try:
current_idx = ids.index(current_lexeme.id)
except ValueError:
current_idx = 0
prev_lexeme = lexemes[current_idx - 1]
try:
next_lexeme = lexemes[current_idx + 1]
except IndexError:
next_lexeme = lexemes[0]
return (prev_lexeme, next_lexeme) | 5,354,187 |
def check_user(user):
"""
Check and verify user status.
registered confirmed disabled merged usable-password
ACTIVE: x x o o x
NOT_CONFIRMED (default) : o o o o x
NOT_CONFIRMED (external) : o o o o o
NOT_CLAIMED : o o o o o
DISABLED : x x x o x
USER_MERGED : x x o x o
Unlike users created via username-password signup, unconfirmed accounts created by an external
IdP (e.g. ORCiD Login) have unusable passwords. To detect them, check the ``external_identity``
property of the user object. See ``created_by_external_idp_and_unconfirmed()`` for details.
:param user: the user object to check
:raises `UnconfirmedAccountError` if the user was created via default useraname / password
sign-up, or if via ORCiD login with pending status "LINK" or "CREATE" to confirm
:raises `UnclaimedAccountError` if the user was created as an unregistered contributor of a
project or group waiting to be claimed
:raises `DeactivatedAccountError` if the user has been disabled / deactivated
:raises `MergedAccountError` if the user has been merged into another account
:raises `InvalidAccountError` if the user is not active and not of the expected inactive status
:returns nothing if user is active and no exception is raised
"""
# An active user must be registered, claimed, confirmed, not merged, not disabled, and either
# has a usable password or has a verified external identity.
if user.is_active:
return
# The user has been disabled / deactivated
if user.is_disabled:
raise DeactivatedAccountError
# The user has been merged into another one
if user.is_merged:
raise MergedAccountError
# The user has not been confirmed or claimed
if not user.is_confirmed and not user.is_registered:
if user.has_usable_password() or created_by_external_idp_and_unconfirmed(user):
raise UnconfirmedAccountError
raise UnclaimedAccountError
# For all other cases, the user status is invalid. Although such status can't be reached with
# normal user-facing web application flow, it is still possible as a result of direct database
# access, coding bugs, database corruption, etc.
raise InvalidAccountError | 5,354,188 |
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [
Index(com.try_sort(x)) if not isinstance(x, Index) else x
for x in indexes
]
kinds.remove(list)
else:
return indexes, "list"
if len(kinds) > 1 or Index not in kinds:
return indexes, "special"
else:
return indexes, "array" | 5,354,189 |
def get_tetranuc_freqs(given_seq):
"""
Returns dictionary mapping each of the 4^4 = 256 possible tetranucleotides
to its observed frequency in the given sequence.
Args:
given_seq:
Returns:
"""
return {tetranuc : get_observed_oligonuc_freq(given_seq, tetranuc) for tetranuc in TETRANUCLEOTIDES} | 5,354,190 |
def calculate3_onemetric(pred_ccm, pred_ad, truth_ccm, truth_ad, rnd=0.01, method="orig_nc", verbose=False, full_matrix=True, in_mat=2):
"""Calculate the score for subchallenge 3 using the given metric
:param pred_ccm: predicted co-clustering matrix
:param pred_ad: predicted ancestor-descendant matrix
:param truth_ccm: true co-clustering matrix
:param truth_ad: trus ancestor-descendant matrix
:param method: method to use when evaluating the submission
:param verbose: boolean for whether to display information about the score calculations
:param full_matrix: boolean for whether to use the full CCM/AD matrix when calculating the score
:param in_mat: number representing which matrices to use in calculating the SC3 scoring metric
Options:
1 - use all input matrics i.e. CCM, ADM, ADM^T and CM
2 - use all except co-clustering matrix (CCM)
3 - use all except ancestor descendant matrix (ADM)
4 - use all except ADM^T
5 - use all except cousin matrix (CM)
:return: score for the given submission to subchallenge 3 using the given metric
"""
# Get the cousin matrices
truth_cous = 1 - truth_ccm - truth_ad - truth_ad.T
pred_cous = 1 - pred_ccm - pred_ad - pred_ad.T
if verbose:
if(np.amax(truth_cous) > 1 or np.amin(truth_cous) < 0):
Warning("Cousin Truth is wrong. Maximum matrix entry is greater than 1 or minimum matrix entry is less than 0")
if(np.amax(pred_cous) > 1 or np.amin(pred_cous) < 0):
Warning("Cousin Predicted is wrong. Maximum matrix entry is greater than 1 or minimum matrix entry is less than 0")
# Calculate the metric measure for each specified matrix
func = method_funcs[method]
results = []
ccm_res, ad_res, ad_res_t, cous_res = [float('nan')] * 4
if method in ("pseudoV",
"simpleKL",
"sym_pseudoV"):
if in_mat != 2:
ccm_res = func(pred_ccm, truth_ccm, rnd, full_matrix=full_matrix)
results.append(ccm_res)
if in_mat != 3:
ad_res = func(pred_ad, truth_ad, rnd, full_matrix=full_matrix)
results.append(ad_res)
if in_mat != 4:
ad_res_t = func(np.transpose(pred_ad), np.transpose(truth_ad), rnd, full_matrix=full_matrix)
results.append(ad_res_t)
if in_mat != 5:
cous_res = func(pred_cous, truth_cous, rnd, full_matrix=full_matrix)
results.append(cous_res)
else:
if in_mat != 2:
ccm_res = func(pred_ccm, truth_ccm, full_matrix=full_matrix)
results.append(ccm_res)
if in_mat != 3:
ad_res = func(pred_ad, truth_ad, full_matrix=full_matrix)
results.append(ad_res)
if in_mat != 4 or method in ('mcc',
'pearson',
'spearman'):
ad_res_t = func(np.transpose(pred_ad), np.transpose(truth_ad), full_matrix=full_matrix)
results.append(ad_res_t)
if in_mat != 5:
cous_res = func(pred_cous, truth_cous, full_matrix=full_matrix)
results.append(cous_res)
res = 0
n = 0
for r in results: # TODO: fix the NA's
if not math.isnan(r):
n += 1
res += r
if n > 0:
res = res / float(n)
if verbose:
print("%s for Matrices\nCC: %s, AD: %s, AD Transpose: %s, Cousin: %s\nResult: %s" %
(method, str(ccm_res), str(ad_res),str(ad_res_t),str(cous_res), str(res)))
return res | 5,354,191 |
def _prv_keyinfo_from_wif(
wif: String, network: Optional[str] = None, compressed: Optional[bool] = None
) -> PrvkeyInfo:
"""Return private key tuple(int, compressed, network) from a WIF.
WIF is always compressed and includes network information:
here the 'network, compressed' input parameters are passed
only to allow consistency checks.
"""
if isinstance(wif, str):
wif = wif.strip()
payload = b58decode(wif)
net = network_from_key_value("wif", payload[:1])
if net is None:
raise BTClibValueError(f"invalid wif prefix: {payload[:1]!r}")
if network is not None and net != network:
raise BTClibValueError(f"not a {network} wif: {wif!r}")
ec = NETWORKS[net].curve
if len(payload) == ec.n_size + 2: # compressed WIF
compr = True
if payload[-1] != 0x01: # must have a trailing 0x01
raise BTClibValueError("not a compressed WIF: missing trailing 0x01")
prv_key = payload[1:-1]
elif len(payload) == ec.n_size + 1: # uncompressed WIF
compr = False
prv_key = payload[1:]
else:
raise BTClibValueError(f"wrong WIF size: {len(payload)}")
if compressed is not None and compr != compressed:
raise BTClibValueError("compression requirement mismatch")
q = int.from_bytes(prv_key, byteorder="big")
if not 0 < q < ec.n:
raise BTClibValueError(f"private key {hex(q)} not in [1, n-1]")
return q, net, compr | 5,354,192 |
def network_opf(network,snapshots=None):
"""Optimal power flow for snapshots."""
raise NotImplementedError("Non-linear optimal power flow not supported yet") | 5,354,193 |
def dp_split(words: Sequence[str], line_limit: int = 80) -> List[List[str]]:
"""
TODO
:param words:
:param line_limit:
:return:
"""
pass | 5,354,194 |
def r2_on_off():
"""Switch on/off relay 2"""
r2_cmd_packet = b'\x04\x14\x02\x00\x00\xe6\x0f'
ser_relay.write(r2_cmd_packet) | 5,354,195 |
def build_cmake_defines(args, dirs, env_vars, stage):
"""
Generate cmake defines
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:param env_vars: An instance of the EnvVars class with the compilers/linker to use
:param stage: What stage we are at
:return: A set of defines
"""
defines = {}
# Get slim defines if we are not building a full toolchain
if not args.full_toolchain:
defines.update(slim_cmake_defines())
# Add compiler/linker defines, which change based on stage
defines.update(cc_ld_cmake_defines(dirs, env_vars, stage))
# Add distribution specific defines
defines.update(distro_cmake_defines())
# Add project and target defines, which change based on stage
defines.update(project_cmake_defines(args, stage))
defines.update(target_cmake_defines(args, stage))
# Add other stage specific defines
defines.update(stage_specific_cmake_defines(args, dirs, stage))
# Add the vendor string if necessary
if args.clang_vendor:
defines['CLANG_VENDOR'] = args.clang_vendor
# Removes system dependency on terminfo to keep the dynamic library dependencies slim
defines['LLVM_ENABLE_TERMINFO'] = 'OFF'
return defines | 5,354,196 |
def server() -> None:
"""Старт сервера"""
class PredictionServicer(predictions_pb2_grpc.PredictionServicer):
def PredictIris(self, request, context):
response = predictions_pb2.PredictResponse()
response.iris_type = predictions.predict_iris(request.sepal_length,
request.sepal_width,
request.petal_length,
request.petal_width)
return response
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
predictions_pb2_grpc.add_PredictionServicer_to_server(PredictionServicer(), server)
print('Стартовал сервер. Порт 50051.')
server.add_insecure_port('[::]:50051')
# CTRL+C
server.start()
server.wait_for_termination() | 5,354,197 |
def wasLastResponseHTTPError():
"""
Returns True if the last web request resulted in an erroneous HTTP code (like 500)
"""
threadData = getCurrentThreadData()
return threadData.lastHTTPError and threadData.lastHTTPError[0] == threadData.lastRequestUID | 5,354,198 |
async def update_rates(
user_id: str = None,
client_id: str = None,
new_amount: str = None,
session: Session = Depends(get_session),
):
"""Update a rate."""
statement = (
select(Rate)
.where(Rate.user_id == user_id)
.where(Rate.client_id == client_id)
.where(Rate.is_active == True)
)
rate_to_update = session.exec(statement).one()
rate_to_update.amount = new_amount
session.add(rate_to_update)
session.commit()
session.refresh(rate_to_update)
return True | 5,354,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.