content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
import configparser
def load_config(config_file_path):
"""
Load the config ini, parse settings to WORC
Args:
config_file_path (String): path of the .ini config file
Returns:
settings_dict (dict): dict with the loaded settings
"""
if not os.path.exists(config_file_path):
e = f'File {config_file_path} does not exist!'
raise ae.WORCKeyError(e)
settings = configparser.ConfigParser()
settings.read(config_file_path)
settings_dict = {'ComBat': dict()}
# Convert settings
settings_dict['ComBat']['batch'] =\
[str(item).strip() for item in
settings['ComBat']['batch'].split(',')]
settings_dict['ComBat']['mod'] =\
[str(item).strip() for item in
settings['ComBat']['mod'].split(',')]
settings_dict['ComBat']['par'] =\
settings['ComBat'].getint('par')
settings_dict['ComBat']['eb'] =\
settings['ComBat'].getint('eb')
settings_dict['ComBat']['language'] =\
str(settings['ComBat']['language'])
settings_dict['ComBat']['matlab'] =\
str(settings['ComBat']['matlab'])
settings_dict['ComBat']['per_feature'] =\
int(settings['ComBat']['per_feature'])
settings_dict['ComBat']['excluded_features'] =\
[str(item).strip() for item in
settings['ComBat']['excluded_features'].split(',')]
return settings_dict | d46e7e3ef92b5cb3a61a9ec2339b407adb506a68 | 3,651,900 |
from typing import Dict
def get_last_confirmed() -> Dict:
"""
This function get the last day saved on mongodb and
show us the confirmed cases and the accumulated.
- The country is the only needed path parameter.
"""
date = db.find({}, {"date": 1, "_id": 0}).sort("date", -1).limit(1)
date = list(date)
pipeline = [
{"$match": {"date": {"$eq": date[0]["date"]}}},
{
"$project": {
"cases": 1,
"cases_accumulated": 1,
"date": {"$dateToString": {"format": "%Y-%m-%d", "date": "$date"}},
}
},
{
"$group": {
"_id": "$date",
"cases_accumulated": {"$sum": "$cases_accumulated"},
"cases": {"$sum": "$cases"},
}
},
{"$project": {"date": "$_id", "cases": 1, "cases_accumulated": 1, "_id": 0}},
]
result = db.aggregate(pipeline)
return loads(json_util.dumps(list(result)[0])) | af2cf9ea3da1d361bf1347c389c2c9a6f095629e | 3,651,901 |
def _newNode( cls, named ):
"""Construct new instance of cls, set proper color, and add to objects"""
if not scene.visible:
scene.visible = 1
if not [k for k in ('color','red','green','blue') if k in named]:
named['color'] = scene.foreground
if 'display' in named:
target = named['display']
del named['display'] # XXX fix when have backref added
else:
target = scene
if not target.visible:
target.visible = 1
node = cls(**named)
objs = target.objects
objs.append( node )
target.objects = objs
return node | 7b81b4ec5c1a540f7159dd532c87cc2ebd3c7150 | 3,651,902 |
def MplJs():
"""
Serves the generated matplotlib javascript file. The content
is dynamically generated based on which toolbar functions the
user has defined. Call `FigureManagerWebAgg` to get its
content.
"""
js_content = FigureManagerWebAgg.get_javascript()
resp = make_response(js_content, 200)
resp.headers['Content-Type'] = 'application/javascript'
return resp | d7b34b86d75f1375f6788a40245e7d04cb3ce6d9 | 3,651,903 |
def VVSS2021_fig4_plot(data, model, sizes=fig_sizes, cmaps=colormaps):
"""
Create and save a plot of the results from the linear regression reaction time model
:param data: the data frame
:param model: the fitted reaction time model
:param cmaps: a dictionary of colormaps
:param sizes: a dictionary of sizes
:return: Nothind
"""
fig, axs = plt.subplots(1, 1, figsize=(sizes['width'], sizes['height']))
sampleIDs = [1, 2, 3, 4, 5, 6]
t_cm_discrete = cmaps['t_cm'](np.linspace(0, 1, len(sampleIDs)))
for col, c in zip(sampleIDs, t_cm_discrete):
tw = 'sampleProbHit_0{}'.format(col)
pred_rt = model.coefs.loc['(Intercept)', 'Estimate'] + model.coefs.loc[tw, 'Estimate'] * data.loc[:, tw]
axs.plot(data[tw], pred_rt, label='sample {}'.format(col), color=c, linewidth=5)
axs.legend(loc=(1, 0))
axs.set_ylabel('response time [s]')
axs.set_xlabel('normalized p[H]')
fig.savefig(path_figs + "Fig4_lmRTs.pdf", bbox_inches='tight')
return None | 47e63763073c454f1c44cf6e4c590b8b7a985f43 | 3,651,904 |
def stat(noten):
""" Berechne Mittelwert, Median, min, max, oberes und unteres Quantil """
minimum = round(min(noten), 2)
maximum = round(max(noten), 2)
_median = median(noten)
_mittelwert = mittelwert(noten)
[unteres_quartil, oberes_quartil] = quartile(noten)
return [minimum, unteres_quartil, _median, _mittelwert, oberes_quartil, maximum] | 89d00c9b91b142366a4ca927298931a2f22bc715 | 3,651,905 |
def translation_activate_block(function=None, language=None):
"""
Activate language only for one method or function
"""
def _translation_activate_block(function):
def _decorator(*args, **kwargs):
tmp_language = translation.get_language()
try:
translation.activate(language or settings.LANGUAGE_CODE)
return function(*args, **kwargs)
finally:
translation.activate(tmp_language)
return wraps(function)(_decorator)
if function:
return _translation_activate_block(function)
else:
return _translation_activate_block | 8615b02e4e3aa0560be0734f8e6564755f5e5e9b | 3,651,906 |
def _loaded_schema_collections(schema_file_relative_dir) -> SchemaCollectionManager:
"""A loaded ``SchemaCollectionManager`` object, but this should never be modified. This object manages ``Schema``
objects corresponding to ``tests/{datasets,formats,licenses}.yaml``. Note that these are not necessarily the same as
the ones used in other schema fixtures, so please do not assume that it is equal to other schema fixtures. One
purpose of this fixture is to reduce repeated call in the test to the same function when ``loaded_schemata`` is
used. The other purpose is to provide other session-scoped fixtures access to the loaded schemata, because
session-scoped fixtures can't load function-scoped fixtures.
"""
return SchemaCollectionManager(datasets=DatasetSchemaCollection(schema_file_relative_dir / 'datasets.yaml'),
formats=FormatSchemaCollection(schema_file_relative_dir / 'formats.yaml'),
licenses=LicenseSchemaCollection(schema_file_relative_dir / 'licenses.yaml')) | ba5d03c8ad1c622391247ef505ccad21476c17d2 | 3,651,907 |
import uuid
def dag(name=None, child_tasks=None, edges=None, target=None):
"""
Create a DAG task
Args:
name (str): Name for the task
child_tasks (list [Task]): Child tasks within this dag
edges (list [tuple (Ref, Ref)]): List of tuples of ref(Task).
Each element denotes an edge from
first task to the second.
target (Ref): Target entity reference
Returns:
(Task): DAG task
"""
dag_edges = []
for edge in edges or []:
if len(edge) != 2:
raise ValueError("DAG edges require a tuple of two task references")
for task_ref in edge:
if not getattr(task_ref, "__kind__") == "app_ref":
raise ValueError("{} is not a valid task reference".format(task_ref))
from_ref = edge[0]
to_ref = edge[1]
dag_edges.append({"from_task_reference": from_ref, "to_task_reference": to_ref})
# This follows UI naming convention for runbooks
name = name or str(uuid.uuid4())[:8] + "_dag"
kwargs = {
"name": name,
"child_tasks_local_reference_list": [
task.get_ref() for task in child_tasks or []
],
"attrs": {"edges": dag_edges},
"type": "DAG",
}
if target:
kwargs["target_any_local_reference"] = target
return _task_create(**kwargs) | ce12e46141ab030297303b4d55585475eb74f2cf | 3,651,908 |
async def async_validate_pdf_signature(
embedded_sig: EmbeddedPdfSignature,
signer_validation_context: ValidationContext = None,
ts_validation_context: ValidationContext = None,
ac_validation_context: ValidationContext = None,
diff_policy: DiffPolicy = None,
key_usage_settings: KeyUsageConstraints = None,
skip_diff: bool = False) -> PdfSignatureStatus:
"""
.. versionadded:: 0.9.0
.. versionchanged: 0.11.0
Added ``ac_validation_context`` param.
Validate a PDF signature.
:param embedded_sig:
Embedded signature to evaluate.
:param signer_validation_context:
Validation context to use to validate the signature's chain of trust.
:param ts_validation_context:
Validation context to use to validate the timestamp's chain of trust
(defaults to ``signer_validation_context``).
:param ac_validation_context:
Validation context to use to validate attribute certificates.
If not supplied, no AC validation will be performed.
.. note::
:rfc:`5755` requires attribute authority trust roots to be specified
explicitly; hence why there's no default.
:param diff_policy:
Policy to evaluate potential incremental updates that were appended
to the signed revision of the document.
Defaults to
:const:`~pyhanko.sign.diff_analysis.DEFAULT_DIFF_POLICY`.
:param key_usage_settings:
A :class:`.KeyUsageConstraints` object specifying which key usages
must or must not be present in the signer's certificate.
:param skip_diff:
If ``True``, skip the difference analysis step entirely.
:return:
The status of the PDF signature in question.
"""
sig_object = embedded_sig.sig_object
if embedded_sig.sig_object_type != '/Sig':
raise SignatureValidationError("Signature object type must be /Sig")
# check whether the subfilter type is one we support
subfilter_str = sig_object.get('/SubFilter', None)
_validate_subfilter(
subfilter_str,
(SigSeedSubFilter.ADOBE_PKCS7_DETACHED, SigSeedSubFilter.PADES),
"%s is not a recognized SubFilter type in signatures."
)
if ts_validation_context is None:
ts_validation_context = signer_validation_context
embedded_sig.compute_integrity_info(
diff_policy=diff_policy, skip_diff=skip_diff
)
status_kwargs = embedded_sig.summarise_integrity_info()
ts_status_kwargs = await collect_timing_info(
embedded_sig.signer_info, ts_validation_context,
raw_digest=embedded_sig.external_digest
)
status_kwargs.update(ts_status_kwargs)
if 'signer_reported_dt' not in status_kwargs:
# maybe the PDF signature dictionary declares /M
signer_reported_dt = embedded_sig.self_reported_timestamp
if signer_reported_dt is not None:
status_kwargs['signer_reported_dt'] = signer_reported_dt
status_kwargs = await cms_basic_validation(
embedded_sig.signed_data, status_cls=PdfSignatureStatus,
raw_digest=embedded_sig.external_digest,
validation_context=signer_validation_context,
status_kwargs=status_kwargs, key_usage_settings=key_usage_settings
)
tst_validity = status_kwargs.get('timestamp_validity', None)
timestamp_found = (
tst_validity is not None
and tst_validity.valid and tst_validity.trusted
)
sv_update = report_seed_value_validation(
embedded_sig, status_kwargs['validation_path'], timestamp_found
)
status_kwargs.update(sv_update)
if ac_validation_context is not None:
ac_validation_context.certificate_registry.register_multiple(
embedded_sig.other_embedded_certs
)
status_kwargs.update(
await collect_signer_attr_status(
sd_attr_certificates=embedded_sig.embedded_attr_certs,
signer_cert=embedded_sig.signer_cert,
validation_context=ac_validation_context,
sd_signed_attrs=embedded_sig.signer_info['signed_attrs']
)
)
return PdfSignatureStatus(**status_kwargs) | fb4a8ae244d80c672ddc35c94d75953ab2d7d119 | 3,651,909 |
import inspect
def get_one_to_many_foreign_key_column_name(model, name):
"""
Returns the constituent column names for the foreign key on the remote
table of the one-to-many relationship specified by name.
Args:
model (class or object): The given model class or model instance.
name (string): The name of the attribute on `model` which is a
one-to-many relationship.
Return:
list: One-to-many foreign key column names as a list of strings.
"""
if not inspect.isclass(model):
return get_one_to_many_foreign_key_column_name(model.__class__, name)
attr = getattr(model, name, None)
if not attr:
# Unknown attribute.
return []
remote_columns = getattr(attr.property, 'remote_side', None)
if not remote_columns:
# This is not a one-to-many relationship.
return []
remote_tables = set(c.table.name for c in remote_columns)
if len(remote_tables) > 1:
# This is a many-to-many relationship with a cross reference table.
return []
foreign_key_column_names = []
for remote_column in remote_columns:
if getattr(remote_column, 'foreign_keys', False):
foreign_key_column_names.append(remote_column.name)
else:
remote_model = get_model_by_table(model, remote_column.table)
if remote_model:
# Quasi foreign keys don't actually have foreign_keys set,
# but they need to be treated as though they did.
foreign_keys = getattr(remote_model, 'quasi_foreign_keys', [])
if remote_column.name in foreign_keys:
foreign_key_column_names.append(remote_column.name)
return foreign_key_column_names | f829de2cbb29034f033f3c124837ac888f7526eb | 3,651,910 |
import argparse
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='Check the status of dnsmasq')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--url', default='www.redhat.com', help='site to be checked')
return parser.parse_args() | de22e0b754d204c5c59fcfc2cb452fea844d387d | 3,651,911 |
import argparse
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonacci demonstration")
parser.add_argument(
"--version",
action="version",
version="bytespread {ver}".format(ver=__version__))
parser.add_argument(
"-d",
dest="directory",
required=True,
help="The directly to analyse")
parser.add_argument(
"-w",
dest="wildcard",
default="*",
required=False,
help="Wildcard for file match within the directory (default: *)")
parser.add_argument(
"-c",
dest="clusters",
default=32,
required=False,
type=int,
help="Number of clusters (default: 32)")
parser.add_argument(
"-b",
dest="bricks",
default=100,
required=False,
type=int,
help="Number bricks to show for the longest column (default: 100)")
parser.add_argument(
"-r",
dest="recursive",
action='store_true',
required=False,
help="Recursive within the provided folder (default: false)")
return parser.parse_args(args) | 96a7a5825ef0ba990e6bfac4e4a4d49e29bfb662 | 3,651,912 |
from typing import List
def formula(formula: str, formula_param: str, cols: List[str]) -> Aggregation:
""" Create a user defined formula aggregation.
Args:
formula (str): the user defined formula to apply to each group
formula_param (str): the parameter name within the formula
cols (List[str]): the columns to aggregate on, can be renaming expressions, i.e. "new_col = col"
Returns:
an aggregation
"""
return Aggregation(j_aggregation=_JAggregation.AggFormula(formula, formula_param, *cols)) | 86247179aa3252bf24500c53b2cf7c20eb9afe62 | 3,651,913 |
def num_false_positives(df):
"""Total number of false positives (false-alarms)."""
return df.noraw.Type.isin(['FP']).sum() | 6aa339b86d15072c6a6910a43e70281575da5d36 | 3,651,914 |
import ctypes
def repmot(instr, marker, value, repcase, lenout=None):
"""
Replace a marker with the text representation of an ordinal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmot_c.html
:param instr: Input string.
:type instr: str
:param marker: Marker to be replaced.
:type marker: str
:param value: Replacement value.
:type value: int
:param repcase: Case of replacement text.
:type repcase: str
:param lenout: Optional available space in output string.
:type lenout: int
:return: Output string.
:rtype: str
"""
if lenout is None:
lenout = ctypes.c_int(len(instr) + len(marker) + 15)
instr = stypes.stringToCharP(instr)
marker = stypes.stringToCharP(marker)
value = ctypes.c_int(value)
repcase = ctypes.c_char(repcase.encode(encoding='UTF-8'))
out = stypes.stringToCharP(lenout)
libspice.repmot_c(instr, marker, value, repcase, lenout, out)
return stypes.toPythonString(out) | 1be758f8c594805ae08c4b2e62014809e62039ad | 3,651,915 |
from typing import Optional
def check_for_pending_actions(
user: IdPUser, ticket: SSOLoginData, sso_session: SSOSession
) -> Optional[WerkzeugResponse]:
"""
Check whether there are any pending actions for the current user,
and if there are, redirect to the actions app.
:param user: the authenticating user
:param ticket: SSOLoginData instance
:param sso_session: SSOSession
"""
if current_app.actions_db is None:
current_app.logger.info('This IdP is not initialized for special actions')
return None
# Add any actions that may depend on the login data
add_idp_initiated_actions(user, ticket)
actions_eppn = current_app.actions_db.get_actions(user.eppn, session=ticket.key)
# Check for pending actions
pending_actions = [a for a in actions_eppn if a.result is None]
if not pending_actions:
# eduid_webapp.idp.mfa_action.check_authn_result will have added the credential used
# to the ticket.mfa_action_creds hash - transfer it to the session
update = False
for cred_key, ts in ticket.mfa_action_creds.items():
cred = user.credentials.find(cred_key)
authn = AuthnData(cred_id=cred.key, timestamp=ts)
sso_session.add_authn_credential(authn)
update = True
# eduid_webapp.idp.mfa_action.check_authn_result will have added any external mfa used to
# the ticket.mfa_action_external - transfer it to the session
if ticket.mfa_action_external is not None:
sso_session.external_mfa = ticket.mfa_action_external
update = True
if update:
current_app.sso_sessions.save(sso_session)
current_app.logger.debug(f'There are no pending actions for user {user}')
return None
# Pending actions found, redirect to the actions app
current_app.logger.debug(f'There are pending actions for user {user}: {pending_actions}')
actions_uri = current_app.conf.actions_app_uri
current_app.logger.info(f'Redirecting user {user} to actions app {actions_uri}')
# TODO: The IdP should never _write_ to the actions namespace. Actions should _read_
# the ticket.key from the IdP namespace instead.
actions = Actions(ts=utc_now(), session=ticket.key)
session.actions = actions
return redirect(actions_uri) | 372b232d24003f6f8cbaefd8293fec233376a743 | 3,651,916 |
def emulatte_RESOLVE(
thicks, resistivity, freqs, nfreq, spans, height,
vca_index=None, add_noise=False, noise_ave=None, noise_std=None
):
"""
return : ndarray
[
Re(HCP1), Re(HCP2), Re(HCP3), (Re(VCX)), Re(HCP4), Re(HCP5),
Im(HCP1), Im(HCP2), Im(HCP3), (Im(VCX)), Im(HCP4), Im(HCP5),
]
"""
#フォワード計算
tc = [0, 0, -height]
hankel_filter = 'werthmuller201'
moment = 1
displacement_current = False
res = np.append(2e14, resistivity)
model = fwd.model(thicks)
model.set_properties(res=res)
fields = []
primary_fields = []
# HCP, VCA応答の計算
for i in range(nfreq):
f = np.array([freqs[i]])
rc = [-spans[i], 0, -height]
# VCAあり
if (nfreq == 6) and (i == vca_index):
hmdx = fwd.transmitter("HMDx", f, moment=moment)
model.locate(hmdx, tc, rc)
resp = model.emulate(hankel_filter=hankel_filter)
resp = resp['h_x'][0]
primary_field = moment / (2 * np.pi * spans[i] ** 3)
# VCAなし
else:
vmd = fwd.transmitter("VMD", f, moment=moment)
model.locate(vmd, tc, rc)
resp = model.emulate(hankel_filter=hankel_filter)
resp = resp['h_z'][0]
primary_field = - moment / (4 * np.pi * spans[i] ** 3)
fields.append(resp)
primary_fields.append(primary_field)
fields = np.array(fields)
primary_fields = np.array(primary_fields)
#1次磁場、2次磁場をppmに変換
inph_total_field = np.real(fields)
quad_secondary_field = np.imag(fields)
inph_secondary_field = inph_total_field - primary_fields
real_ppm = abs(inph_secondary_field / primary_fields) * 1e6
imag_ppm = abs(quad_secondary_field / primary_fields) * 1e6
# bookpurnongのそれぞれの周波数のノイズレベル Christensen(2009)
# ノイズ付加
add = np.random.choice([True, False], p=[0.7, 0.3])
if (add_noise & add):
noise = [nlv for nlv in zip(noise_ave, noise_std)]
for index, nlv in enumerate(noise):
inphnoise = np.random.normal(nlv[0], nlv[1])
quadnoise = np.random.normal(nlv[0], nlv[1])
real_ppm[index] = real_ppm[index] + inphnoise
imag_ppm[index] = imag_ppm[index] + quadnoise
resp = np.hstack([real_ppm, imag_ppm])
return resp | 0cdd44d9d3d53c1813ed25f230e29adec36fca5e | 3,651,917 |
import csv
from pathlib import Path
def metadata_dict_chex_mimic(metadata_location):
"""Reads whole csv to find image_name, creates dict with nonempty bboxes
Output:
Bboxes dictionary with key the img_name and values the bboxes themselves."""
bboxes = {}
with open(metadata_location) as f_obj:
reader = csv.reader(f_obj, delimiter=',')
next(reader) # skip header
for line in reader:
_, img_name, x, y, w, h = [int(entry) if entry.isnumeric() else entry for entry in line]
if h != 0 and w != 0: # only append nonempty bboxes
img_name = str(Path(img_name)) # compatibility between different OS
bboxes.setdefault(img_name, []) # these two lines allow safe placing of multiple values for key
bboxes[img_name].append([x, y, w, h])
return bboxes | 716358d1eb5a77c177a41076bb630108d7ffc934 | 3,651,918 |
def create_feature_df(cnv_dict, feature_type, labels, csv=False):
"""Creates a pandas Dataframe containing cnvs as rows and features as columns"""
# get features for each CNV
cnv_features = []
if csv:
for chrom in cnv_dict:
for cnv in cnv_dict[chrom]:
if cnv.tads:
cnv_features.append(
np.append([cnv.chr, cnv.start, cnv.end], cnv.annotate(feature_type)))
feature_df = pd.DataFrame(data=cnv_features, columns=[
'CHR', 'START', 'END'] + labels)
else:
for chrom in cnv_dict:
for cnv in cnv_dict[chrom]:
if cnv.tads:
cnv_features.append(cnv.annotate(feature_type))
feature_df = pd.DataFrame(data=cnv_features, columns=labels)
return feature_df | 76af71f73ee09a7cbfbafed3ad447b20a98e0da5 | 3,651,919 |
import xml
def simulationcell_from_axes(axes, bconds='p p p', rckc=15.):
""" construct the <simulationcell> xml element from axes
Args:
axes (np.array): lattice vectors
bconds (str, optional): boundary conditions in x,y,z directions.
p for periodic, n for non-periodic, default to 'p p p'
rckc: long-range cutoff paramter rc*kc, default to 15
Return:
etree.Element: representing <simulationcell>
"""
def pad_line(line): # allow content to be selected by double clicked
return ' ' + line + ' '
# write primitive lattice vectors
lat_node = etree.Element('parameter', attrib={
'name': 'lattice',
'units': 'bohr'
})
lat_node.text = xml.arr2text(axes)
# write boundary conditions
bconds_node = etree.Element('parameter', {'name': 'bconds'})
bconds_node.text = pad_line(bconds)
# write long-range cutoff parameter
lr_node = etree.Element('parameter', {'name': 'LR_dim_cutoff'})
lr_node.text = pad_line(str(rckc))
# build <simulationcell>
sc_node = etree.Element('simulationcell')
sc_node.append(lat_node)
sc_node.append(bconds_node)
sc_node.append(lr_node)
return sc_node | c3cdc77f9cce7ef09418459832c60b6570d7e11c | 3,651,920 |
def diff_seq(seq1, seq0):
"""Returns the difference of two sequences: seq1 - seq0.
Args:
seq1: The left operand.
seq0: The right operand.
Returns:
The difference of the two sequences.
"""
return (seq1 - seq0) % MAX_SEQ | cd1632357d6ff61fcd2a32ba71a6a6be2454521d | 3,651,921 |
def method_menu():
"""Method menu items
1. Add a new method
2. Duplicate selected method
3. Remove selected method
------------------------------
4. Clear methods
"""
message_method = "You are about to delete all methods. Do you want to continue?"
method_items = [
menu_item(icon_text("fas fa-plus-circle", "Add a new method"), id="add_method"),
menu_item(
icon_text("fas fa-clone", "Duplicate selection"), id="duplicate_method"
),
menu_item(
icon_text("fas fa-minus-circle", "Remove selection"), id="remove_method"
),
menu_item(divider=True),
menu_item("Clear all methods", id="clear-methods"),
menu_item(divider=True),
menu_item("Measurement", header=True),
menu_item(
dcc.Upload(
icon_text("fas fa-paperclip", "Add to selection"),
id="add-measurement-for-method",
)
),
menu_item(
icon_text("fas fa-times-circle", "Remove from selection"),
id="remove-measurement-from-method",
),
dcc.ConfirmDialog(id="confirm-clear-methods", message=message_method),
]
# Callbacks for the add, duplicate, and remove methods
_ = [
app.clientside_callback(
f"""function() {{
document.getElementById("{t}-method-button").click();
throw window.dash_clientside.PreventUpdate;
}}""",
Output(f"{t}-method-button", "n_clicks"),
Input(f"{t}_method", "n_clicks"),
prevent_initial_call=True,
)
for t in TARGET
]
# Callbacks for the clear all methods
app.clientside_callback(
"""function(n) {
if (n == null) throw window.dash_clientside.PreventUpdate;
return true;
}""",
Output("confirm-clear-methods", "displayed"),
Input("clear-methods", "n_clicks"),
prevent_initial_call=True,
)
return create_submenu(label="Method", children=method_items, right=False) | d22a820a249728650b49f75ccfcdc254f3a84e76 | 3,651,922 |
def shapeanalysis_OuterWire(*args):
"""
* Returns the outer wire on the face <Face>. This is replacement of the method BRepTools::OuterWire until it works badly. Returns the first wire oriented as outer according to FClass2d_Classifier. If none, last wire is returned.
:param face:
:type face: TopoDS_Face &
:rtype: TopoDS_Wire
"""
return _ShapeAnalysis.shapeanalysis_OuterWire(*args) | 7fa38d16cdfe40f802dea7d93666870e82d5cf26 | 3,651,923 |
def is_numeric(val: str) -> bool:
"""Check whether an unparsed string is a numeric value"""
if val in MISSING_VALUES:
return True
try:
float(val)
except Exception:
return False
else:
return True | 72d6095c32f3bd89c0ae8bda22dc4b9a6461468b | 3,651,924 |
import tempfile
import os
def safeRun( commandArgs ):
"""
Runs the given command and reads the output
"""
errTmp = tempfile.mkstemp()
errStream = os.fdopen( errTmp[0] )
process = Popen( commandArgs, stdin = PIPE,
stdout = PIPE, stderr = errStream )
process.stdin.close()
processStdout = process.stdout.read()
process.stdout.close()
errStream.seek( 0 )
err = errStream.read()
errStream.close()
os.unlink( errTmp[1] )
process.wait()
# 'grep' return codes:
# 0 - OK, lines found
# 1 - OK, no lines found
# 2 - Error occured
if process.returncode != 0 and commandArgs[0] != "grep":
raise Exception( "Error in '%s' invocation: %s" % \
(commandArgs[0], err) )
return processStdout | 217ddceaaaab339e80adaab6c7884c751474e53f | 3,651,925 |
def expand_options(sent, as_strings=True):
"""
['1', '(', '2', '|', '3, ')'] -> [['1', '2'], ['1', '3']]
For example:
Will it (rain|pour) (today|tomorrow|)?
---->
Will it rain today?
Will it rain tomorrow?
Will it rain?
Will it pour today?
Will it pour tomorrow?
Will it pour?
Args:
sent (list<str>): List of sentence in sentence
Returns:
list<list<str>>: Multiple possible sentences from original
"""
return expand_parentheses(sent, as_strings) | 2b07ac0cfee7339b11016f68792500bf855df019 | 3,651,926 |
def gcd_recursive_by_divrem(m, n):
"""
Computes the greatest common divisor of two numbers by recursively getting remainder from
division.
:param int m: First number.
:param int n: Second number.
:returns: GCD as a number.
"""
if n == 0:
return m
return gcd_recursive_by_divrem(n, m % n) | bd25d9cea4813e523ea6bb9bd85c24bf43dd2744 | 3,651,927 |
def repeat(atoms, coord):
"""
Repeat atoms (:class:`AtomArray` or :class:`AtomArrayStack`)
multiple times in the same model with different coordinates.
Parameters
----------
atoms : AtomArray, shape=(n,) or AtomArrayStack, shape=(m,n)
The atoms to be repeated.
coord : ndarray, dtype=float, shape=(k,n,3) or shape=(k,m,n,3)
The coordinates to be used fr the repeated atoms.
The length of first dimension determines the number of repeats.
If `atoms` is an :class:`AtomArray` 3 dimensions, otherwise
4 dimensions are required.
Returns
-------
repeated: AtomArray, shape=(n*k,) or AtomArrayStack, shape=(m,n*k)
The repeated atoms.
Whether an :class:`AtomArray` or an :class:`AtomArrayStack` is
returned depends on the input `atoms`.
Examples
--------
>>> atoms = array([
... Atom([1,2,3], res_id=1, atom_name="N"),
... Atom([4,5,6], res_id=1, atom_name="CA"),
... Atom([7,8,9], res_id=1, atom_name="C")
... ])
>>> print(atoms)
1 N 1.000 2.000 3.000
1 CA 4.000 5.000 6.000
1 C 7.000 8.000 9.000
>>> repeat_coord = np.array([
... [[0,0,0], [1,1,1], [2,2,2]],
... [[3,3,3], [4,4,4], [5,5,5]]
... ])
>>> print(repeat(atoms, repeat_coord))
1 N 0.000 0.000 0.000
1 CA 1.000 1.000 1.000
1 C 2.000 2.000 2.000
1 N 3.000 3.000 3.000
1 CA 4.000 4.000 4.000
1 C 5.000 5.000 5.000
"""
if isinstance(atoms, AtomArray) and coord.ndim != 3:
raise ValueError(
f"Expected 3 dimensions for the coordinate array, got {coord.ndim}"
)
elif isinstance(atoms, AtomArrayStack) and coord.ndim != 4:
raise ValueError(
f"Expected 4 dimensions for the coordinate array, got {coord.ndim}"
)
repetitions = len(coord)
orig_length = atoms.array_length()
new_length = orig_length * repetitions
if isinstance(atoms, AtomArray):
if coord.ndim != 3:
raise ValueError(
f"Expected 3 dimensions for the coordinate array, "
f"but got {coord.ndim}"
)
repeated = AtomArray(new_length)
repeated.coord = coord.reshape((new_length, 3))
elif isinstance(atoms, AtomArrayStack):
if coord.ndim != 4:
raise ValueError(
f"Expected 4 dimensions for the coordinate array, "
f"but got {coord.ndim}"
)
repeated = AtomArrayStack(atoms.stack_depth(), new_length)
repeated.coord = coord.reshape((atoms.stack_depth(), new_length, 3))
else:
raise TypeError(
f"Expected 'AtomArray' or 'AtomArrayStack', "
f"but got {type(atoms).__name__}"
)
for category in atoms.get_annotation_categories():
annot = np.tile(atoms.get_annotation(category), repetitions)
repeated.set_annotation(category, annot)
if atoms.bonds is not None:
bonds = atoms.bonds
for _ in range(repetitions-1):
bonds += atoms.bonds
repeated.bonds = bonds
if atoms.box is not None:
repeated.box = atoms.box.copy()
return repeated | b4f86bad25061807370d0f1eacdb0637eb8a19cc | 3,651,928 |
def get_mzi_delta_length(m, neff=2.4, wavelength=1.55):
""" m*wavelength = neff * delta_length """
return m * wavelength / neff | 5bcd4b9b217c79a06b48856f7801060787f12e52 | 3,651,929 |
def yagzag2radec(yag, zag, q):
"""
Given ACA Y-ang, Z-ang and pointing quaternion determine RA, Dec. The
input ``yag`` and ``zag`` values can be 1-d arrays in which case the output
``ra`` and ``dec`` will be corresponding arrays of the same length.
:param yag: ACA Y angle (degrees)
:param zag: ACA Z angle (degrees)
:param q: Quaternion
:rtype: list ra, dec (degrees)
"""
try:
one = np.ones(len(yag))
except TypeError:
one = 1.0
d_aca = np.array([one, tan(radians(yag)), tan(radians(zag))])
d_aca *= 1.0 / np.sum(d_aca**2)
eci = np.dot(q.transform, d_aca)
return eci2radec(eci) | e7266f5c0dd0763238c3f12fafebea19c080022d | 3,651,930 |
from tensorflow.keras.preprocessing.image import load_img
from typing import Union
def load_image(
path: str,
color_mode="rgb",
target_size: Union[None, ImageSize] = None,
normalize=False,
) -> np.ndarray:
"""Load an RGB image from the given path, optionally resizing it.
:param path: Path to the image
:param color_mode: "rgb", "bgr" or "grayscale"
:param target_size: Target size of the image (width, height).
:param normalize: Normalize values to [0.0, [1.0]
"""
pil_color_mode = color_mode
if pil_color_mode == "bgr":
pil_color_mode = "rgb"
pil = load_img(path, color_mode=pil_color_mode, target_size=target_size)
image = np.array(pil)
if color_mode == "bgr":
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if normalize:
image = image / 255.0
return image | 9b80d39561e3ccdae778675e011ab5c52c04db4f | 3,651,931 |
def deit_base_patch16_384():
"""
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
cfg = ViTConfig(
name="deit_base_patch16_384",
url="",
input_size=(384, 384),
patch_size=16,
embed_dim=768,
nb_blocks=12,
nb_heads=12,
crop_pct=1.0,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return ViT, cfg | 720968c8a95de30c836631fd5505c8029eaf46c0 | 3,651,932 |
def update_output(
event_id,
event_dividers,
light_dividers,
filename,
geometry,
do_plot_tracks,
do_plot_opids,
figure,
):
"""Update 3D event display end event id"""
fig = go.Figure(figure)
if event_dividers is None:
return no_update, no_update, no_update, no_update, no_update
try:
fig.data = []
fig.add_traces(
draw_event(
filename,
GEOMETRIES[geometry],
event_dividers,
light_dividers,
event_id,
do_plot_tracks,
do_plot_opids,
)
)
except IndexError as err:
print("IndexError", err)
return fig, {"display": "none"}, True, no_update, no_update
except KeyError as err:
print("KeyError", err)
return fig, {"display": "none"}, True, "Select a geometry first", no_update
url_filename = filename.replace(DOCKER_MOUNTED_FOLDER, "")
return (
fig,
{"height": "85vh"},
False,
no_update,
f"https://larnddisplay.lbl.gov/{url_filename}?geom={geometry}#{event_id}",
) | 370f7a165ddaf870d4806564d8b86f4d5c4e90b5 | 3,651,933 |
import requests
def get_containerports(marathon_url, app_id):
"""
Get containerports if we have portmapping.
marathon_url : [string] the URL of the marathon service
app_id : [string] ID of the running marathon app
Method : GET
Return : list of ports
"""
api_endpoint = '/v2/apps/'
headers = {'Content-Type': 'application/json'}
url = marathon_url + api_endpoint + app_id
print(url)
r = requests.get(url, headers=headers)
print(r.status_code)
containerports = []
for portmapping in r.json()['app']['container']['docker']['portMappings']:
containerports.append(portmapping['containerPort'])
return containerports | eb22656e58b2b7156015b84c63755cb4f4348502 | 3,651,934 |
def backward_algorithm(O, HMM_model):
"""HMM Backward Algorithm.
Args:
O: (o1, o2, ..., oT), observations
HMM_model: (pi, A, B), (init state prob, transition prob, emitting prob)
Return:
prob: the probability of HMM_model generating O.
"""
pi, A, B = HMM_model
T = len(O)
N = len(pi)
prob = 0.0
# Begin Assignment
# 后向概率矩阵
betas = np.zeros((N, T))
for i in range(N):
betas[i][0] = 1
for t in range(1, T):
for i in range(N):
for j in range(N):
betas[i][t] += A[i][j]*B[j][O[T-t]]*betas[j][t-1]
for i in range(N):
prob += pi[i]*B[i][O[0]]*betas[i][-1]
# End Assignment
return prob | da958ddd8d8943546030ba4306b7f632061d96bc | 3,651,935 |
def scan_codes(code_type, image):
"""Get *code_type* codes from a PIL Image
*code_type* can be any of zbar supported code type [#zbar_symbologies]_:
- **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
- **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
DataBar (`databar`) and DataBar Expanded (`databar-exp`)
- **2D**: QR Code (`qrcode`)
- **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`
.. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html
Args:
code_type (str): Code type to search
image (PIL.Image.Image): Image to scan
returns:
A list of *code_type* code values or None
"""
assert Image.isImageType(image)
converted_image = image.convert('L') # Convert image to gray scale (8 bits per pixel).
raw = converted_image.tobytes() # Get image data.
width, height = converted_image.size # Get image size.
return zbar_code_scanner('{0}.enable'.format(code_type).encode(), raw, width, height) | 02c70551138ffc5dc386e753d7532c28466de97e | 3,651,936 |
def get_summoner_masteries(summoner_ids):
"""
https://developer.riotgames.com/api/methods#!/1017/3450
Args:
summoner_ids (int | list<int>): the summoner ID(s) to get mastery pages for
Returns:
dict<str, MasteryPages>: the requested summoners' mastery pages
"""
# Can only have 40 summoners max if it's a list
if isinstance(summoner_ids, list) and len(summoner_ids) > 40:
raise ValueError("Can only get masteries for up to 40 summoners at once.")
id_string = ",".join(str(x) for x in summoner_ids) if isinstance(summoner_ids, list) else str(summoner_ids)
# Get JSON response
request = "{version}/summoner/{ids}/masteries".format(version=cassiopeia.dto.requests.api_versions["summoner"], ids=id_string)
response = cassiopeia.dto.requests.get(request)
# Convert response to Dto type
for id_, masteries in response.items():
response[id_] = cassiopeia.type.dto.summoner.MasteryPages(masteries)
return response | 55b9395cd452e444f049d05af0718ca847f346d0 | 3,651,937 |
def make_pod_spec(
name,
image_spec,
image_pull_policy,
image_pull_secret,
port,
cmd,
node_selector,
run_as_uid,
fs_gid,
env,
working_dir,
volumes,
volume_mounts,
labels,
cpu_limit,
cpu_guarantee,
mem_limit,
mem_guarantee,
lifecycle_hooks,
init_containers,
):
"""
Make a k8s pod specification for running a user notebook.
Parameters:
- name:
Name of pod. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
- image_spec:
Image specification - usually a image name and tag in the form
of image_name:tag. Same thing you would use with docker commandline
arguments
- image_pull_policy:
Image pull policy - one of 'Always', 'IfNotPresent' or 'Never'. Decides
when kubernetes will check for a newer version of image and pull it when
running a pod.
- image_pull_secret:
Image pull secret - Default is None -- set to your secret name to pull
from private docker registry.
- port:
Port the notebook server is going to be listening on
- cmd:
The command used to execute the singleuser server.
- node_selector:
Dictionary Selector to match nodes where to launch the Pods
- run_as_uid:
The UID used to run single-user pods. The default is to run as the user
specified in the Dockerfile, if this is set to None.
- fs_gid
The gid that will own any fresh volumes mounted into this pod, if using
volume types that support this (such as GCE). This should be a group that
the uid the process is running as should be a member of, so that it can
read / write to the volumes mounted.
- env:
Dictionary of environment variables.
- volumes:
List of dictionaries containing the volumes of various types this pod
will be using. See k8s documentation about volumes on how to specify
these
- volume_mounts:
List of dictionaries mapping paths in the container and the volume(
specified in volumes) that should be mounted on them. See the k8s
documentaiton for more details
- working_dir:
String specifying the working directory for the notebook container
- labels:
Labels to add to the spawned pod.
- cpu_limit:
Float specifying the max number of CPU cores the user's pod is
allowed to use.
- cpu_guarentee:
Float specifying the max number of CPU cores the user's pod is
guaranteed to have access to, by the scheduler.
- mem_limit:
String specifying the max amount of RAM the user's pod is allowed
to use. String instead of float/int since common suffixes are allowed
- mem_guarantee:
String specifying the max amount of RAM the user's pod is guaranteed
to have access to. String ins loat/int since common suffixes
are allowed
- lifecycle_hooks:
Dictionary of lifecycle hooks
- init_containers:
List of initialization containers belonging to the pod.
"""
api_client = ApiClient()
pod = V1Pod()
pod.kind = "Pod"
pod.api_version = "v1"
pod.metadata = V1ObjectMeta()
pod.metadata.name = name
pod.metadata.labels = labels.copy()
pod.spec = V1PodSpec()
security_context = V1PodSecurityContext()
if fs_gid is not None:
security_context.fs_group = int(fs_gid)
if run_as_uid is not None:
security_context.run_as_user = int(run_as_uid)
pod.spec.security_context = security_context
if image_pull_secret is not None:
pod.spec.image_pull_secrets = []
image_secret = V1LocalObjectReference()
image_secret.name = image_pull_secret
pod.spec.image_pull_secrets.append(image_secret)
if node_selector:
pod.spec.node_selector = node_selector
pod.spec.containers = []
notebook_container = V1Container()
notebook_container.name = "notebook"
notebook_container.image = image_spec
notebook_container.working_dir = working_dir
notebook_container.ports = []
port_ = V1ContainerPort()
port_.name = "notebook-port"
port_.container_port = port
notebook_container.ports.append(port_)
notebook_container.env = [V1EnvVar(k, v) for k, v in env.items()]
notebook_container.args = cmd
notebook_container.image_pull_policy = image_pull_policy
notebook_container.lifecycle = lifecycle_hooks
notebook_container.resources = V1ResourceRequirements()
notebook_container.resources.requests = {}
if cpu_guarantee:
notebook_container.resources.requests['cpu'] = cpu_guarantee
if mem_guarantee:
notebook_container.resources.requests['memory'] = mem_guarantee
notebook_container.resources.limits = {}
if cpu_limit:
notebook_container.resources.limits['cpu'] = cpu_limit
if mem_limit:
notebook_container.resources.limits['memory'] = mem_limit
notebook_container.volume_mounts = volume_mounts
pod.spec.containers.append(notebook_container)
pod.spec.init_containers = init_containers
pod.spec.volumes = volumes
return api_client.sanitize_for_serialization(pod) | 0be1782f91ab4de7a0baf0291eb3fcf9c1fc57a4 | 3,651,938 |
def preprocess(text, remove_punct=False, remove_num=True):
"""
preprocess text into clean text for tokenization
"""
# 1. normalize
text = normalize_unicode(text)
# 2. to lower
text = text.lower()
# 3. space
text = spacing_punctuation(text)
text = spacing_number(text)
# (optional)
if remove_punct:
text = remove_punctuation(text)
# 4. de-contract
text = decontracted(text)
# 5. handle number
if remove_num:
text = remove_number(text)
else:
text = clean_number(text)
# 6. remove space
text = remove_space(text)
return text | 289ed6c3032840191ea792b01cb4b3a17535ddf2 | 3,651,939 |
def verify_ptp_calibration_states(
device, states, domain, max_time=15, check_interval=5
):
""" Verify ptp parent values in show ptp parent command
Args:
device (`obj`): Device object
states ('str): PTP calibration state
domain ('str): PTP domain
max_time (int): Maximum wait time for the trigger,
in second. Default: 15
check_interval (int): Wait time between iterations when looping is needed,
in second. Default: 5
Returns:
True
False
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse("show ptp brief | ex FA")
except SchemaEmptyParserError:
pass
if out:
result = True
else:
result = False
if result:
return True
timeout.sleep()
return False | 648e9753b418365d8469ce17bc709ad67d814bf6 | 3,651,940 |
def get_auth_use_case():
"""Get use case instance."""
return auth_use_case | a01595d40d2693ff2b4023a8d7938b4af7734ca3 | 3,651,941 |
import glob
import os
import re
def convert_file_link(file):
"""Reads the content of all files matching the file specification (removing
YAML metadata blocks is required) for insertion into the calling file.
Optionally add a separator between each file and/or add a prefix to each
line of the included files.
Args:
file (Match): A Match object corresponding to the file specification
Returns:
str: the concatentated contents of the file specification
"""
incl_file = str(file.group(1))
file_sep = ''
line_prefix = ''
options = ''
# get file separator, if any
if '|' in incl_file:
incl_file, *options = incl_file.split('|')
if len(options) == 1:
file_sep = options[0]
if len(options) == 2:
file_sep = options[0]
line_prefix = options[1]
# create list of files
incl_list = sorted(glob.glob(os.path.normpath(os.path.join(os.getcwd(), incl_file))))
incl_contents = ''
for i, file in enumerate(incl_list):
with open(file, 'r', encoding='utf8') as input_file:
file_contents = input_file.read()
# TODO check contents for file include regex to do nested includes?
# remove YAML header from file
file_metadata, file_contents = split_doc(file_contents)
# process prefix and suffix in included metadata
if file_metadata:
file_contents = file_metadata.get('prefix', '') + file_contents + file_metadata.get('suffix', '')
if not file_contents:
continue
# replace ?{value}
if config['replace']:
file_contents = MetadataReplace(file_contents).safe_substitute(file_metadata)
# add prefix if required
if line_prefix:
file_contents = line_prefix + re.sub('\n', '\n' + line_prefix, file_contents)
if i < len(incl_list) - 1:
file_contents += '\n\n' + file_sep
incl_contents += file_contents + '\n\n'
# return contents of all matched files
return incl_contents | f1af225482e558fe8ef60c98b3f0f5692ae70133 | 3,651,942 |
def pagerank(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, nstart=None, weight='weight',
dangling=None):
"""Returns the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key some subset of graph nodes and personalization value each of those.
At least one personalization value must be non-zero.
If not specfiied, a nodes personalization value will be zero.
By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified). This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank(G, alpha=0.9)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop after
an error tolerance of ``len(G) * tol`` has been reached. If the
number of iterations exceed `max_iter`, a
:exc:`networkx.exception.PowerIterationFailedConvergence` exception
is raised.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs by converting each edge in the
directed graph to two edges.
See Also
--------
pagerank_numpy, pagerank_scipy, google_matrix
Raises
------
PowerIterationFailedConvergence
If the algorithm fails to converge to the specified tolerance
within the specified number of iterations of the power iteration
method.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
# Create a copy in (right) stochastic form
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
# Choose fixed starting vector if not given
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
# Normalized nstart vector
s = float(sum(nstart.values()))
x = dict((k, v / s) for k, v in nstart.items())
if personalization is None:
# Assign uniform personalization vector if not given
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = dict((k, v / s) for k, v in personalization.items())
if dangling is None:
# Use personalization vector if dangling vector not specified
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = dict((k, v / s) for k, v in dangling.items())
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
# this matrix multiply looks odd because it is
# doing a left multiply x^T=xlast^T*W
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
# check convergence, l1 norm
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter) | 1d6e758275a3caf33049e5c042b7bde8f4cff17d | 3,651,943 |
from pathlib import Path
import zlib
import tqdm
def fetch_file(name, chunksize=16 * 1024):
"""
Fetch a datafile from a compressed/gzipped URL source.
Parameters
----------
name : :class:`str`
Name of the file to fetch.
chunksize : :class:`int`
Number of bytes to read in a chunk.
"""
fp, url, compressed = [
(Path(k), url, compressed)
for (k, (url, compressed)) in MANIFEST.items()
if name.lower() in Path(k).name.lower()
][0]
if "1drv" in url:
url = get_onedrive_directlink(
url
) # allow direct access to file object for 1drv
# construct relative path from this file
local_target = (Path(__file__).parent / fp).resolve()
if not local_target.exists():
if not local_target.parent.exists():
local_target.parent.mkdir(parents=True)
if compressed:
dec = zlib.decompressobj(
32 + zlib.MAX_WBITS
) # offset 32 to skip the header
decompress = dec.decompress
else:
decompress = lambda x: x
with urlopen(url) as response:
pbar = tqdm.tqdm(
total=int(response.headers["content-length"]),
unit="b",
unit_scale=True,
unit_divisor=1024,
desc=str(fp.name),
)
CHUNKSIZE = 16 * 1024
with open(local_target, "wb") as f:
while True:
chunk = response.read(chunksize)
if chunk:
rv = decompress(chunk)
f.write(rv)
pbar.update(len(chunk))
else:
break
return fp | f22eb09220135b542bb3b0e599abe896664dffa3 | 3,651,944 |
def create_include(workflow_stat):
"""
Generates the html script include content.
@param workflow_stat the WorkflowInfo object reference
"""
include_str = """
<script type='text/javascript' src='bc_action.js'>
</script>
<script type='text/javascript' src='bc_""" + workflow_stat.wf_uuid +"""_data.js'>
</script>
"""
return include_str | 24151952c9dd5bc4034916dae90a3760fc06ca44 | 3,651,945 |
import random
def choose_sample_from_group(
group: general.ParameterListType,
) -> general.ParameterValuesType:
"""
Choose single sample from group DataFrame.
"""
# Make continous index from 0
indexes = [idx for idx in range(len(group))]
assert len(indexes) > 0
# Choose from indexes
choice = random.choices(population=indexes, k=1)[0]
# Get the dict at choice index
chosen_dict = group[choice]
assert isinstance(chosen_dict, dict)
return chosen_dict | 27f1c8a9ca4640b881f5bdd3faca0db4b1b882da | 3,651,946 |
def path_available(filepath):
# type: (str) -> bool
"""Return true if filepath is available"""
parent_directory = dirname(filepath)
if not exists(parent_directory):
raise ParentDirectoryDoesNotExist(parent_directory, filepath)
return not exists(filepath) | efd506d2028f2c55e88dfc618395620571205773 | 3,651,947 |
from typing import Dict
from typing import Any
from typing import Callable
def memory_item_to_resource(urn: URN, items: Dict[str, Any] = None, loader: Callable = None) -> CloudWandererResource:
"""Convert a resource and its attributes to a CloudWandererResource.
Arguments:
urn (URN): The URN of the resource.
items (dict): The dictionary of items stored under this URN. (Secondary Attributs, BaseResource)
loader (Callable): The method which can be used to fulfil the :meth:`CloudWandererResource.load`
"""
items = items or {}
attributes = [
attribute
for item_type, attribute in items.items()
if item_type not in ["SubresourceUrns", "BaseResource", "ParentUrn"]
]
base_resource: Dict[str, Any] = next(
iter(resource for item_type, resource in items.items() if item_type == "BaseResource"), {}
)
return CloudWandererResource(
urn=urn,
subresource_urns=items.get("SubresourceUrns"),
resource_data=base_resource,
secondary_attributes=attributes,
loader=loader,
) | 0bf680574f2ef3038d9d29c656a657e4e7a172ec | 3,651,948 |
def sample_user(email='[email protected]', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password) | deb5c45287a8ff546e2631c4409d10015b550e5c | 3,651,949 |
import PIL
import random
def ShearX(img: Image, magnitude: float) -> Image:
"""Shear the image on x-axis."""
return img.transform(
img.size,
PIL.Image.AFFINE,
(1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
PIL.Image.BICUBIC,
fillcolor=FILLCOLOR,
) | 9d534cfc8f7cc5497356b8e07115d42f666aac5d | 3,651,950 |
import subprocess
def run_command_unchecked(command, cwd, env=None):
"""Runs a command in the given dir, returning its exit code and stdio."""
p = subprocess.Popen(
command,
cwd=cwd,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
stdout, _ = p.communicate()
exit_code = p.wait()
return exit_code, stdout.decode('utf-8', 'replace') | 89c6fd0acf7e8bb81c837f78252bdaa30fe39ad1 | 3,651,951 |
import numpy
def read_onsets(onsets_path: PathLike) -> numpy.array:
"""
Read a text file containing onsets. Return it as a list of floats.
"""
with open(onsets_path, "r") as io:
lines = io.readlines()
onsets = numpy.array([float(line) for line in lines])
return onsets | cbfa38ce0a5e2ea4d6c465251ee8c3e6ec47d04f | 3,651,952 |
def format_specific_efficacy(method, type_1: str, type_2: str = None):
""" Format the efficacy string specifically for defense or attack. """
effective, ineffective, useless = format_damage(method, type_1, type_2)
type_name = format_type(type_1, type_2)
s = "**{}** \N{EN DASH} **{}**\n".format(type_name, "DEFENSE" if method is defense_method else "ATTACK")
if effective:
s += "Super effective: `{}`\n".format(", ".join(effective))
if ineffective:
s += "Not very effective: `{}`\n".format(", ".join(ineffective))
if useless:
s += "No effect: `{}`\n".format(", ".join(useless))
return s | 095f943cda0dfdf1803ae38b16c6b9d7f8fd3e1f | 3,651,953 |
import sys
def postfix(itemString):
"""transform infixExpre into postfixExpre
Algorithm:
step1: if operator, stack in;
step2: if "(", stack in.
step3: if variable, pop out the all continued unary operator until other operator or "("
step4: if ")", pop out all operators until "(", then pop all continued unary operator.
step5: goto step1.
Arg:
itemString: bitwise expression string persented in infix.
Return:
itemStr: expression string persented in postfix.
"""
itemStr = ""
boperatorList = ["&", "|", "^"]
uoperator = "~"
opeList = []
for (idx, char) in enumerate(itemString):
#open parenthesis, stack it
if char == "(":
opeList.append(char)
#binary operatork, stack it
elif char in boperatorList:
opeList.append(char)
#unary operator
elif uoperator in char:
opeList.append(char)
#closed parenthesis, pop out the operator to string
elif char == ")":
while(opeList and opeList[-1] != "("):
itemStr += opeList[-1]
opeList.pop()
if opeList and opeList[-1] != "(":
print("error!")
sys.exit(0)
#open parenthesis found
opeList.pop()
#unary operator found before open parenthesis
while(opeList and opeList[-1] == "~"):
itemStr += opeList[-1]
opeList.pop()
#variable name found
else:
itemStr += char
#top of stack is unary operator
while(opeList and opeList[-1] in uoperator):
itemStr += opeList[-1]
opeList.pop()
if len(opeList) > 1:
print("error in function postfix!")
sys.exit(0)
#have one operator without parenthesis
elif len(opeList):
itemStr += opeList[0]
return itemStr | 1c3bee30f450c1dfab6ca7d0dd057465d8b6e8e5 | 3,651,954 |
def getSuffixes(algorithm, seqType) :
""" Get the suffixes for the right algorithm with the right
sequence type
"""
suffixes = {}
suffixes['LAST'] = {}
suffixes['BLAST'] = {}
suffixes['BLAST']['nucl'] = ['nhr', 'nsq', 'nin']
suffixes['BLAST']['prot'] = ['phr', 'psq', 'pin']
suffixes['LAST']['nucl'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ]
suffixes['LAST']['prot'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ]
if not algorithm in suffixes:
return None
if not seqType in suffixes[algorithm]:
return None
return suffixes[algorithm][seqType] | 9ab699a71be73381c4dff555f0ef19201589e82f | 3,651,955 |
import tempfile
from robocorp_code.path_operations import get_user
from robocorp_code.path_operations import make_numbered_dir_with_cleanup
from robocorp_code.path_operations import LOCK_TIMEOUT
from typing import Optional
from pathlib import Path
def make_numbered_in_temp(
keep: int = 10,
lock_timeout: float = -1,
tmpdir: Optional[Path] = None,
register=None,
) -> Path:
"""
Helper to create a numbered directory in the temp dir with automatic disposal
of old contents.
"""
user = get_user() or "unknown"
temproot = tmpdir if tmpdir else Path(tempfile.gettempdir())
rootdir = temproot / f"robocorp-code-{user}"
rootdir.mkdir(exist_ok=True)
return make_numbered_dir_with_cleanup(
prefix="rcc-",
root=rootdir,
keep=keep,
lock_timeout=lock_timeout if lock_timeout > 0 else LOCK_TIMEOUT,
register=register,
) | 9ba3d08d933d961099d5169afc25c152177857b3 | 3,651,956 |
def handle_server_api(output, kwargs):
""" Special handler for API-call 'set_config' [servers] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if name:
server = config.get_config('servers', name)
if server:
server.set_dict(kwargs)
old_name = name
else:
config.ConfigServer(name, kwargs)
old_name = None
Downloader.do.update_server(old_name, name)
return name | 0c4396619c1aee1151642de3edeb4b28d76acb9c | 3,651,957 |
def compare_names(namepartsA, namepartsB):
"""Takes two name-parts lists (as lists of words) and returns a score."""
complement = set(namepartsA) ^ set(namepartsB)
intersection = set(namepartsA) & set(namepartsB)
score = float(len(intersection))/(len(intersection)+len(complement))
return score | 87cbceaaa0acce0b83b5faf66cbe909ad52382eb | 3,651,958 |
def Normal_VaR(return_matrix, theta,Horizon): #500 datas needed
"""
Compute the Value-at-Risk and Conditional Value-at-Risk
Parameters
----------
risk_returns : np.ndarray
theta : np.float64
Horizon : np.int16
Returns
----------
np.ndarray,np.ndarray VaR , CVaR
"""
mean_forecast,var_forecast,conditional_volatility = Arch_data(return_matrix , Horizon )
excess_innovations = Extract_Excess_Innovations(return_matrix , mean_forecast , conditional_volatility )
mu,scale = Dist_parameters(excess_innovations)
VaR,CVaR = Var_CVaR_extractor(mean_forecast,var_forecast,scale,mu,theta)
return VaR,CVaR | a2b911d647c942724dc30480bb90db7c83e200bb | 3,651,959 |
def oscillator_amplitude(state, ders, period, floquet, zero_phase_lc, phase_warmup_periods=5, thr=0.0, dt=0.005):
"""calculates the isostable amplitude of the oscillator from dynamical equations
:param state: state of the system
:param ders: a list of state variable derivatives
:param period: oscillator period
:param floquet: floquet exponent
:param zero_phase_lc: zero phase limit cycle state
:param phase_warmup_periods: how many periods to wait for evaluating the asymptotic phase shift (default 5)
:param thr: threshold determining zero phase (default 0.0)
:param dt: time step (default 0.005)
:return: isostable amplitude of state"""
# get phase
phase = oscillator_phase(state, ders, period, phase_warmup_periods, thr=thr, dt=dt)
# calculate time to evolve to zero isochron
time = (1-phase/(2*pi))*period
# evolve to 0 isochron
state = integrate_period(state, ders, time, dt)
# amplitude sign
if(inside_limit_cycle(state, ders, period)):
sign = -1
else:
sign = 1
return 0.5*sign*distance(state,zero_phase_lc)*exp(floquet*time) | b6a55d9965eea712be2f49dbbc1f186d268f82bf | 3,651,960 |
def commonprefix(a, b):
"""Find longest common prefix of `a` and `b`."""
pos = 0
length = min(len(a), len(b))
while pos < length and a[pos] == b[pos]:
pos += 1
return pos, b | 75e2f9ac6c3d0c38986cba5f8409ddc87fe8edbe | 3,651,961 |
def parse_datetime(strtime):
"""
Parse a string date, time & tz into a datetime object:
2003-03-20 05:00:00-07
"""
offset = int(strtime[-3:])
date_time = dt.strptime(strtime[:-4], '%Y-%m-%d %H:%M:%S')
offset = timedelta(hours=offset)
return (date_time + offset).replace(tzinfo=utc) | c7537ed913a4d0b20a71b7253725231c32c9f60b | 3,651,962 |
from typing import List
from typing import cast
from typing import Iterable
def traverse_depth_first(base: AnyDependency) -> List[AnyDependency]:
"""Performs a depth first traversal of the dependency tree.
"""
def _traverse_tree_2(base: AnyDependency) -> List[AnyDependency]:
queue: List[AnyDependency] = []
current_idx = 0
queue.append(base)
while len(queue) != current_idx:
node = queue[current_idx]
if not isinstance(node, UnresolvedDependency):
queue.extend(cast(Iterable, node.dependencies))
current_idx += 1
return queue
deps = _traverse_tree_2(base)
return deps | 1172f3b97110cc41c68631d3e6a91a0ea8d20627 | 3,651,963 |
def update_config(
client,
key,
*,
value=None,
remove=False,
global_only=False,
commit_message=None
):
"""Add, update, or remove configuration values."""
section, section_key = _split_section_and_key(key)
if remove:
value = client.remove_value(
section, section_key, global_only=global_only
)
if value is None:
raise errors.ParameterError('Key "{}" not found.'.format(key))
else:
client.set_value(section, section_key, value, global_only=global_only)
return value | 59f71b2608ddcfb38cdf1845720d782b7858607f | 3,651,964 |
from datetime import datetime
def parse_time(t):
""" parse a date time string, or a negative number as
the number of seconds ago.
returns unix timestamp in MS
"""
try:
tint = int(t)
if tint <= 0:
return int(nowms() + (tint * 1000))
except ValueError:
pass
#the parsed date may or may not have a tz; if it does not, localize it.
parsed = dtparse(t)
if not parsed.tzinfo:
parsed = parsed.replace(tzinfo=tzlocal())
#Get the millisec by subtracting epoch in the same tz, then x 1000
return int((parsed - datetime.fromtimestamp(0, parsed.tzinfo)).total_seconds() * 1000) | 68189b1d0aa2f73152a77a1a790fc6a291e5ff25 | 3,651,965 |
def _get_duration(tmin: np.datetime64, tmax: np.datetime64) -> str:
"""
Determine the duration of the given datetimes.
See also: `ISO 8601 Durations <https://en.wikipedia.org/wiki/ISO_8601#Durations>`_
:param tmin: Time minimum
:param tmax: Time maximum
:return: Temporal resolution formatted as an ISO 8601:2004 duration string
"""
delta = tmax - tmin
day = np.timedelta64(1, 'D')
days = (delta.astype('timedelta64[D]') / day) + 1
return 'P{}D'.format(int(days)) | e56c399402a1325bc519443ea4caea57be2806e7 | 3,651,966 |
import math
def get_polyend_circle_angles(a, b, isLeft):
"""
theta0 = pi/2 + betta, theta1 = 2 * pi + betta;
betta = pi/2 - alpha;
alpha = atan(a)
"""
if a is None and b is None:
return None, None
alpha = math.pi / 2.0 if a is None else math.atan(a)
betta = math.pi / 2.0 - alpha
shift = 0.0 if isLeft else math.pi
theta0 = betta + shift
theta1 = theta0 + math.pi
return theta0, theta1 | 9547ba4ea9f74cba3d52d90bb24dc8c4b246fbff | 3,651,967 |
import argparse
import pathlib
def main():
"""Main entrypoint."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("file", nargs="+", type=pathlib.Path)
args = parser.parse_args()
hooks.utils.check_executable("packer")
return hooks.utils.bulk_check(
packer_fix,
args.file,
) | 025acb7a6ed7b5e5dff9b0fe0ad743616945e2ee | 3,651,968 |
import re
def get_search_cache_key(prefix, *args):
""" Generate suitable key to cache twitter tag context
"""
key = '%s_%s' % (prefix, '_'.join([str(arg) for arg in args if arg]))
not_allowed = re.compile('[^%s]' % ''.join([chr(i) for i in range(33, 128)]))
key = not_allowed.sub('', key)
return key | f3ff5baa13e4e84deb5c13cd8d5b618ba75c8699 | 3,651,969 |
def main(argv=None):
"""Run pragma-no-mutate filter with specified command line arguments.
"""
return PragmaNoMutateFilter().main(argv) | f268a010b454fe28307e8e304dca3d57fe1e635a | 3,651,970 |
import math
def independence_single_value(values, sigma=0.70):
"""
This calculates the independence of the models for a given metric
where the metric is single valued, e.g. the slope of a gradient.
------Input------
values (list) : The single values for each model.
sigma (float) : The value of sigma_s
-----Returns-----
S (np.array 2D) : The inter model similarity
W (np.array 1D) : The weight per model from the similarity calculation
"""
sigma_s = sigma
# Can first calculate inter model distances S and D
S = np.zeros((len(values), len(values)))
# Weightings W dims=num_models
W = np.zeros((len(values), 1))
for i, model_i in enumerate(values):
i_data = model_i
for j, model_j in enumerate(values):
if i != j:
j_data = model_j
s = math.exp(-((i_data - j_data) ** 2).sum() / (1 * sigma_s ** 2))
S[i, j] = s
for ii in range(len(values)):
w = 1 / (1 + np.nansum(S[ii], 0))
W[ii] = w
W /= np.nansum(W)
return S, W | 0802966fed4d9cb5b9e2d525d10593534c5c51a0 | 3,651,971 |
def extract_fingerprints(atoms, i_jbond_dict, radius):
"""Extract the r-radius subgraphs (i.e., fingerprints)
from a molecular graph using Weisfeiler-Lehman algorithm."""
if (len(atoms) == 1) or (radius == 0):
fingerprints = [fingerprint_dict[a] for a in atoms]
else:
nodes = atoms
i_jedge_dict = i_jbond_dict
for _ in range(radius):
"""Update each node ID considering its neighboring nodes and edges
(i.e., r-radius subgraphs or fingerprints)."""
fingerprints = []
for i, j_edge in i_jedge_dict.items():
neighbors = [(nodes[j], edge) for j, edge in j_edge]
fingerprint = (nodes[i], tuple(sorted(neighbors)))
fingerprints.append(fingerprint_dict[fingerprint])
nodes = fingerprints
"""Also update each edge ID considering two nodes
on its both sides."""
_i_jedge_dict = defaultdict(lambda: [])
for i, j_edge in i_jedge_dict.items():
for j, edge in j_edge:
both_side = tuple(sorted((nodes[i], nodes[j])))
edge = edge_dict[(both_side, edge)]
_i_jedge_dict[i].append((j, edge))
i_jedge_dict = _i_jedge_dict
return np.array(fingerprints) | beaa457e0eb514ca7fbfaca846378a0d23c2b94c | 3,651,972 |
import inspect
def GetUniqueClassMembers(Class, Ignore = [], AllowedOverrides = []):
"""
Args:
- Class {object}: reference to the class
- Ignore {List[str]}:
- AlwaysAllow {List[str]}: Always allowed members named x, even if they exists in the parent class
Returns: tuple("Name", Reference)
"""
Members = inspect.getmembers(Class)
ParentClass = GetClassParents(Class)[0]
UniqueMemebers = [x for x in Members if (not hasattr(ParentClass, x[0]) and x[0] not in Ignore) or x[0] in AllowedOverrides] # and not x[0].startswith("__")
return UniqueMemebers | bdd22e6ce7eaa12d2285f7ca7747a0210d3b98c9 | 3,651,973 |
def get_condition_keys_available_to_raw_arn(db_session, raw_arn):
"""
Get a list of condition keys available to a RAW ARN
:param db_session: SQLAlchemy database session object
:param raw_arn: The value in the database, like arn:${Partition}:s3:::${BucketName}/${ObjectName}
"""
rows = db_session.query(ArnTable).filter(ArnTable.raw_arn.like(raw_arn))
result = rows.first()
if result.condition_keys:
condition_keys = result.condition_keys.split(",")
return condition_keys
else:
return False | 8f8025ffe1fd6f6fa750f826b0a3c5b8a4f655eb | 3,651,974 |
def get_reviewer(form):
""" Gets reviewer info, or adds if necessary
"""
reviewer = Reviewer.query.filter_by(email=form.get("reviewer-email")).first()
if reviewer:
reviewer_id = reviewer.reviewer_id
else:
reviewer_id = add_reviewer(form)
return reviewer_id | 641bb81e73bad7f0eeac8a5cbd5efde499535b77 | 3,651,975 |
def read_xyz(using):
"""Reads coordinates of an xyz file and return a list of |Atom| objects, one for each atom"""
coords = []
with open(using, "r") as f:
for coord in f.readlines()[2:]:
line = coord.split()
for val in PT.ptable.values():
if line[0] == val[0]:
coords.append(
Atom(line[0],
coords=tuple(float(i) for i in line[1:4])))
return coords | 9ed1b0de9fe4bd7bbabe63a2d808b08e44315113 | 3,651,976 |
def initialize_classification(model_name: str,
num_classes: int,
use_pretrained: bool =True
) -> (Module, int):
""" Initialize these variables which will be set in this if statement. Each of these
variables is model specific. The final fully-connected layer will fit the new number
of classes. The weights are initialized with the Xavier algorithm. All biases are
initialized to 0.
Args:
model_name (str): Classification network name in ['vgg', 'alexnet', 'resnet', 'googlenet'].
num_classes (int): The number of classes in dataset.
use_pretrain (bool): If true, load pretrained model on ImageNet.
Return:
model (Module): Modified classification network fitting given class number.
input_size (int): input image size for the classification network.
"""
model = None
input_size = None
# VGG-16
if "vgg" in model_name.lower():
model = models.vgg16(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.classifier[6].weight)
nn.init.zeros_(model.classifier[6].bias)
input_size = 224
# Alexnet
elif "alexnet" in model_name.lower():
model = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.classifier[6].weight)
nn.init.zeros_(model.classifier[6].bias)
input_size = 224
# Resnet-50
elif "resnet" in model_name.lower():
if '18' in model_name.lower():
model = models.resnet18(pretrained=use_pretrained)
else:
model = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.fc.weight)
nn.init.zeros_(model.fc.bias)
input_size = 224
# GoogLeNet
elif "googlenet" in model_name.lower():
model = models.googlenet(pretrained=use_pretrained, aux_logits=True)
set_parameter_requires_grad(model, True)
# Handle the auxilary network
num_ftrs = model.aux1.fc2.in_features
model.aux1.fc2 = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.aux1.fc2.weight)
nn.init.zeros_(model.aux1.fc2.bias)
num_ftrs = model.aux2.fc2.in_features
model.aux2.fc2 = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.aux2.fc2.weight)
nn.init.zeros_(model.aux2.fc2.bias)
# Handle the primary network
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.fc.weight)
nn.init.zeros_(model.fc.bias)
input_size = 224
else:
raise ValueError("Invalid classification network name.")
return model, input_size | 23958e7970022b2c0ed77353fa8b927510873bb7 | 3,651,977 |
def get_csc():
"""get Configuration Client"""
config_host = enstore_functions2.default_host()
config_port = enstore_functions2.default_port()
return configuration_client.ConfigurationClient((config_host,config_port)) | 49c2740ac9a654e700079d15f32421e32f8568c3 | 3,651,978 |
def findx(mu, lnum):
"""Obtains the Hill sphere and x-coordinate for a mu-value and lnum."""
hill = (mu/3)**(1.0/3.0)
if lnum == 1: #lnum is used to request one of the collinear Lagrange points.
guess = 1 - mu - hill * (1 - (1.0/3.0) * hill - (1.0/9.0) * hill ** 2)
elif lnum == 2:
guess = 1 - mu + hill * (1 + (1.0/3.0) * hill - (1.0/9.0) * hill ** 2)
elif lnum == 3:
guess = -1 #I know this isn't the formal guess the Mission Handbook might prescribe, but it should suffice
#as the L3 Lagrange point is the only collinear point with x < 0
else:
return "Invalid"
return optimize.fsolve(xroot, guess, mu, xtol = 0.0)[0], hill | 260f9dda3b5a494df15d3c0bbe7ce0ebd0351c9b | 3,651,979 |
def _f1_div_ ( self , other ) :
"""Operator for ``1D-function / other''"""
return _f1_op_ ( self , other , Ostap.MoreRooFit.Division , "Divide_" ) | 5278ec2036724f0bb263487b8880c16b161d8145 | 3,651,980 |
def test_interrupted_late_wait():
"""Test we can interrupt the wait during the timeout period.
"""
called = 0
def cond():
nonlocal called
called += 1
if called == 3:
return True
job = InstrJob(cond, 0)
assert not job.wait_for_completion(lambda: True, refresh_time=0.1)
assert called == 2 | 737df84c71efdaf0e52be5f42c0ae856f9fb1018 | 3,651,981 |
def set_prior_6(para):
"""
set prior before the first data came in
doc details to be added
"""
n_shape = para['n_shape']
log_prob = [ [] for i_shape in range(n_shape) ]
delta_mean = [ [] for i_shape in range(n_shape) ]
delta_var = [ [] for i_shape in range(n_shape) ]
time_since_last_cp = [ [] for i_shape in range(n_shape) ]
return log_prob, delta_mean, delta_var, time_since_last_cp | e97944e1c48ca6def16308584dfe04eaebae6259 | 3,651,982 |
def inf_set_af2(*args):
"""
inf_set_af2(_v) -> bool
"""
return _ida_ida.inf_set_af2(*args) | c9fa149ca8595d053db4eb4d4113e2493b8665de | 3,651,983 |
import matplotlib.pyplot as plt
import matplotlib
def showOverlapTable(modes_x, modes_y, **kwargs):
"""Show overlap table using :func:`~matplotlib.pyplot.pcolor`. *modes_x*
and *modes_y* are sets of normal modes, and correspond to x and y axes of
the plot. Note that mode indices are incremented by **1**. List of modes
is assumed to contain a set of contiguous modes from the same model.
Default arguments for :func:`~matplotlib.pyplot.pcolor`:
* ``cmap='jet'``
* ``norm=matplotlib.colors.Normalize(0, 1)``"""
if isinstance(modes_x, np.ndarray):
num_modes_x = modes_x.shape[1]
else:
num_modes_x = modes_x.numModes()
if isinstance(modes_y, np.ndarray):
num_modes_y = modes_y.shape[1]
else:
num_modes_y = modes_y.numModes()
overlap = abs(calcOverlap(modes_y, modes_x))
if overlap.ndim == 0:
overlap = np.array([[overlap]])
elif overlap.ndim == 1:
overlap = overlap.reshape((num_modes_y, num_modes_x))
cmap = kwargs.pop('cmap', 'jet')
norm = kwargs.pop('norm', matplotlib.colors.Normalize(0, 1))
if SETTINGS['auto_show']:
plt.figure()
x_range = np.arange(1, num_modes_x+1)
if isinstance(modes_x, ModeSet):
x_ticklabels = modes_x._indices+1
else:
x_ticklabels = x_range
x_ticklabels = kwargs.pop('xticklabels', x_ticklabels)
y_range = np.arange(1, num_modes_y+1)
if isinstance(modes_y, ModeSet):
y_ticklabels = modes_y._indices+1
else:
y_ticklabels = y_range
y_ticklabels = kwargs.pop('yticklabels', y_ticklabels)
if not isinstance(modes_x, np.ndarray):
xlabel = str(modes_x)
else:
xlabel = ''
xlabel = kwargs.pop('xlabel', xlabel)
if not isinstance(modes_y, np.ndarray):
ylabel = str(modes_y)
else:
ylabel = ''
ylabel = kwargs.pop('ylabel', ylabel)
allticks = kwargs.pop('allticks', True)
show = showMatrix(overlap, cmap=cmap, norm=norm,
xticklabels=x_ticklabels, yticklabels=y_ticklabels, allticks=allticks,
**kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if SETTINGS['auto_show']:
showFigure()
return show | 86a0853b8caf6ade88185cef17277d01be38ecc7 | 3,651,984 |
import pandas as pd
def json_find_matches_dataframe(df, filter_path, reverse_selectivity=False):
"""Iteratively filters a pandas.DataFrame df using the same sort of
filter_path used by json_extract.
Because of the tabular nature of pandas DataFrames, filters are treated as
being either 'down' or 'check'; a filter either refines both the rows and
columns returned (essentially a 'down' action) or refines only the rows
returned (essentially a 'check' action)."""
for layer in filter_path:
if isinstance(layer, str):
if layer == "!!":
reverse_selectivity = not reverse_selectivity
continue
rows = pd.Series([True] * df.shape[0])
for filt in layer:
new_rows, new_cols = filt.filter_dataframe(df)
rows &= new_rows
if filt.action != "check":
cols = new_cols
else:
cols = df.columns
df = df.loc[rows, cols]
return df | 33f3de47ffbe774d22e2dc9fb7c07f132272452f | 3,651,985 |
def contrast(arr, amount=0.2, split=0.5, normalize=True):
"""
General contrast booster or diffuser of normalized array-like data.
Parameters
----------
arr : ndarray
Input array (of floats on range [0, 1] if ``normalize=False``). If
values exist outside this range, with ``normalize=True`` the image
will be normalized for calculation.
amount : float or length-2 iterable of floats
Controls the exponential contrast mechanism for values above and below
``split`` in ``I``. If positive, the curve provides added contrast;
if negative, the curve provides reduced contrast.
If provided as a lenth-2 iterable of floats, they control the regions
(below, above) ``split`` separately.
split : float
Positive scalar, on range [0, 1], determining the midpoint of the
exponential contrast. Default of 0.5 is reasonable for well-exposed
images.
normalize : bool, default True
Controls normalization to the range [0, 1].
Returns
-------
focused : ndarray
Contrast adjusted, normalized, floating-point image on range [0, 1].
Notes
-----
The result of this algorithm is like applying a Curves adjustment in the
GIMP or Photoshop.
Algorithm for curves adjustment at a given pixel, x, is given by::
| split * (x/split)^below, 0 <= x <= split
y(x) = |
| 1 - (1-split) * ((1-x) / (1-split))^above, split < x <= 1.0
See Also
--------
skfuzzy.fuzzymath.sigmoid
"""
# Ensure scalars are floats, to avoid truncating division in Python 2.x
split = float(split)
im = arr.astype(float)
amount_ = np.asarray(amount, dtype=np.float64).ravel()
if len(amount_) == 1:
# One argument -> Equal amount applied on either side of `split`
above = below = amount_[0]
else:
# Two arguments -> Control contrast separately in light/dark regions
below = amount_[0]
above = amount_[1]
# Normalize if required
if im.max() > 1. and normalize is True:
ma = float(im.max())
im /= float(im.max())
else:
ma = 1.
focused = np.zeros_like(im, dtype=np.float64)
# Simplified array-wise algorithm using fancy indexing rather than looping
focused[im <= split] = split * (im[im <= split] / split) ** below
focused[im > split] = (1 - (1. - split) *
((1 - im[im > split]) / (1. - split)) ** above)
# Reapply multiplicative factor
return focused * ma | 94542fd4df7c65c98f818b652c733ad5a319f449 | 3,651,986 |
def get_group(items, total_groups, group_id):
"""
Get the items from the passed in group based on group size.
"""
if not 0 < group_id <= total_groups:
raise ValueError("Invalid test-group argument")
start, size = get_group_size_and_start(len(items), total_groups, group_id)
selected = items[start : start + size]
deselected = items[:start] + items[start + size :]
assert len(selected) + len(deselected) == len(items)
return selected, deselected | f236c9f26adfa5da5507e7ae91feb8858ac13c6c | 3,651,987 |
def read_nq_entry(entry, is_training):
"""
Converts a NQ entry into a list of NqExamples.
:param entry: dict
:param is_training: bool
:return: list[NqExample]
"""
def is_whitespace(c):
return c in " \t\r\n" or ord(c) == 0x202F
examples = []
contexts_id = entry["id"]
contexts = entry["contexts"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in contexts:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
questions = []
for i, question in enumerate(entry["questions"]):
qas_id = "{}".format(contexts_id)
question_text = question["input_text"]
start_position = None
end_position = None
answer = None
if is_training:
answer_dict = entry["answers"][i]
answer = make_nq_answer(contexts, answer_dict)
# For now, only handle extractive, yes, and no.
if answer is None or answer.offset is None:
continue
start_position = char_to_word_offset[answer.offset]
end_position = char_to_word_offset[answer.offset + len(answer.text) - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(answer.text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text,
cleaned_answer_text)
continue
questions.append(question_text)
example = NqExample(
example_id=int(contexts_id),
qas_id=qas_id,
questions=questions[:],
doc_tokens=doc_tokens,
doc_tokens_map=entry.get("contexts_map", None),
answer=answer,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples | a712ff6a2714798ee49fd90741f387d8cb3b4695 | 3,651,988 |
def calc_atoms(psi, vol_elem=1.0):
"""Calculate the total number of atoms.
Parameters
----------
psi : :obj:`list` of 2D NumPy :obj:`array` or PyTorch :obj:`Tensor`
The input spinor wavefunction.
vol_elem : :obj:`float`
2D volume element of the space.
Returns
-------
atom_num : :obj:`float`
The total atom number in both spin components.
"""
pops = calc_pops(psi, vol_elem=vol_elem)
atom_num = sum(pops)
return atom_num | 9e9d87c9445a6a03fe245b66c2ce1c104a276e7a | 3,651,989 |
def get_tcp_packet_payload_len(pkt: dpkt.ethernet.Ethernet) -> int:
"""
Return the length of only payload without options
:param pkt: dpkt.ethernet.Ethernet packet containing TCP header
:return: int
"""
if isinstance(pkt, dpkt.ethernet.Ethernet):
ip = pkt.data
elif isinstance(pkt, dpkt.ip.IP):
ip = pkt
else:
return None
return ip.len - (ip.hl * 4 + ip.data.off * 4) | 410ec3f76085647def33572cc35f951462dd9324 | 3,651,990 |
def overviewUsage(err=''):
""" default overview information highlighting active scripts"""
m = '%s\n' %err
m += ' The following scripts allow you to manage Team Branches (TmB) on SalesForce.\n'
m += ' Use one of the scripts below to meet your needs.\n'
m += ' \n'
m += ' 1. First link Task Branches to Team Branches \n'
m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> \n'
m += ' \n'
m += ' 2. List Task Branches linked to a Team Branches \n'
m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -d \n'
m += ' \n'
m += ' 3. First link Task Branches to Team Branches \n'
m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -p <low|medium|high|urgent|critical> \n'
m += ' \n'
return m | ba62773dd8be21d17c44e8e295c8228d568512a0 | 3,651,991 |
def min_distance(z_i, z_j, sc_size):
"""Calculates the minimum distance between the particle at
``z_i`` and all of the images of the particle at ``z_j``,
including this. The minimum distance is always less than
half of the size of the simulation supercell ``sc_size``.
:param z_i:
:param z_j:
:param sc_size:
:return:
"""
sc_half = 0.5 * sc_size
z_ij = z_i - z_j
if fabs(z_ij) > sc_half:
# Take the image.
return -sc_half + (z_ij + sc_half) % sc_size
return z_ij | b585eb8e813ca852c4538eea7a9a6f9028a969d7 | 3,651,992 |
import re
def prf(gold: str, pred: str, dic) -> tuple:
"""
计算P、R、F1
:param gold: 标准答案文件,比如“商品 和 服务”
:param pred: 分词结果文件,比如“商品 和服 务”
:param dic: 词典
:return: (P, R, F1, OOV_R, IV_R)
"""
A_size, B_size, A_cap_B_size, OOV, IV, OOV_R, IV_R = 0, 0, 0, 0, 0, 0, 0
with open(gold,encoding='utf8') as gd, open(pred,encoding='utf8') as pd:
for g, p in zip(gd, pd):
A, B = set(to_region(g)), set(to_region(p))
A_size += len(A)
B_size += len(B)
A_cap_B_size += len(A & B)
text = re.sub("\\s+", "", g)
for (start, end) in A:
word = text[start: end]
if dic.containsKey(word):
IV += 1
else:
OOV += 1
for (start, end) in A & B:
word = text[start: end]
if dic.containsKey(word):
IV_R += 1
else:
OOV_R += 1
p, r = A_cap_B_size / B_size * 100, A_cap_B_size / A_size * 100
return p, r, 2 * p * r / (p + r), OOV_R / OOV * 100, IV_R / IV * 100 | a8767bbe4c60eea2433d2c8023a9d7a1af74a4bf | 3,651,993 |
def lorem():
"""Returns some sample latin text to use for prototyping."""
return """
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut
enim ad minim veniam, quis nostrud exercitation ullamco laboris
nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor
in reprehenderit in voluptate velit esse cillum dolore eu fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident,
sunt in culpa qui officia deserunt mollit anim id est laborum.
""" | 6ddacdb23b7c62cf930e622a7fd801b514a419ae | 3,651,994 |
def read_gene2species(* filenames):
"""
Reads a gene2species file
Returns a function that will map gene names to species names.
"""
for filename in filenames:
maps = []
for filename in filenames:
maps.extend(util.read_delim(util.skip_comments(
util.open_stream(filename))))
return make_gene2species(maps) | 90e58b2089f2561642ac1ba6648256888b931080 | 3,651,995 |
from typing import Dict
from typing import Any
from typing import Optional
from sys import path
import requests
def ingest(
token: str,
endpoint: str,
method: str = "GET",
time_zone: str = "Asia/Tokyo",
params: Dict[Any, Any] = {},
data: Dict[Any, Any] = {},
) -> Optional[Dict[Any, Any]]:
"""情報を取得する
"""
url = path.join(GRAPH_ENDPOINT, endpoint)
headers = {
"Authorization": f"Bearer {token}",
"Prefer": f'outlook.timezone="{time_zone}"',
}
response = None
if method == "GET":
response = requests.get(url, headers=headers, params=params)
elif method == "POST":
response = requests.post(url, headers=headers, params=params, json=data)
else:
logger.error(f"{method=}は対応していない形式です")
raise RuntimeError()
if response.ok:
return response.json()
else:
status = response.status_code
if status == NOT_FOUND:
logger.warning("一致する情報が見つかりませんでした")
return {}
else:
logger.error(f"リクエストは無効です:{status=}")
response.raise_for_status() | 70bf1de3cb09743f421ae592214a8e058fdb8365 | 3,651,996 |
def make_list_table(headers, data, title='', columns=None):
"""Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
results = []
add = results.append
add('.. list-table:: %s' % title)
add(' :header-rows: 1')
if columns:
add(' :widths: %s' % (','.join(str(c) for c in columns)))
add('')
add(' - * %s' % headers[0])
for h in headers[1:]:
add(' * %s' % h)
for row in data:
add(' - * %s' % row[0])
for r in row[1:]:
add(' * %s' % r)
add('')
return '\n'.join(results) | 569370b8359ad25bf255f940b5a89d93d896804d | 3,651,997 |
def toss_unbaised():
"""
toss 2 times:
assign 0-1 = 0
assign 1-0 = 1
discard 0-0 and 1-1
"""
while True:
first, second = toss_biased(), toss_biased()
if first == 0 and second == 1:
return 0
if first == 1 and second == 0:
return 1 | 971f3365fbc4f0de34cd51c8060aab5c5037c7b2 | 3,651,998 |
def split_val_condition(input_string):
"""
Split and return a {'value': v, 'condition': c} dict for the value and the condition.
Condition is empty if no condition was found.
@param input A string of the form XXX @ YYYY
"""
try:
(value, condition) = [x.strip() for x in input_string.split('@')]
return {'value': value, 'condition': condition}
except ValueError:
# no condition was found
return {'value': input_string.strip(), 'condition': None} | 97c5733a80b3348928b95e2430bf3630867b2050 | 3,651,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.