content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def config_backed(config_path: str):
"""Second order decorator that sets up a backing config for a
GuildState type.
"""
def deco(gs_type: Type[GuildStateTV]) -> Type[GuildStateTV]:
gs_type._cfg_path = config_path
return gs_type
return deco | 5,356,500 |
def determine_if_is_hmmdb(infp):
"""Return True if the given file is an HMM database (generated using
hmmpress from the HMMer3 software package), and return False otherwise.
"""
#if open(infp, 'r').read().startswith('HMMER3/f'):
if open(infp, 'r').readline().startswith('HMMER3/f'):
return True
else:
return False | 5,356,501 |
def train_UCSDped2():
# type: () -> None
"""
Performs video anomaly detection tests on UCSD Ped2.
"""
dataset_name = "ucsd_ped2"
#
# lam_svdd_opt(dataset_name)
# window_size_opt(dataset_name)
latent_code_size_opt(dataset_name) | 5,356,502 |
def do_compare_training(
args: argparse.Namespace,
story_file: Text,
additional_arguments: Optional[Dict] = None,
) -> None:
"""Train multiple models for comparison of policies and dumps the result."""
train_comparison_models(
story_file=story_file,
domain=args.domain,
output_path=args.out,
exclusion_percentages=args.percentages,
policy_configs=args.config,
runs=args.runs,
additional_arguments=additional_arguments,
)
no_stories = get_no_of_stories(args.stories, args.domain)
# store the list of the number of stories present at each exclusion
# percentage
story_range = [
no_stories - round((x / 100.0) * no_stories) for x in args.percentages
]
training_stories_per_model_file = os.path.join(
args.out, NUMBER_OF_TRAINING_STORIES_FILE
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
training_stories_per_model_file, story_range
) | 5,356,503 |
def update_state(current_state, log_event):
""" current_state is a LogEvent """ | 5,356,504 |
def assignment_path(base_var: str, path: Sequence[daglish.PathElement]) -> str:
"""Generates the LHS of an assignment, given a traversal path.
Example: ["foo", 3, "bar"] -> "foo[3].bar".
Args:
base_var: Base variable name.
path: Attribute path on `base_var` to assign to.
Returns:
Python code string for the LHS of an assignment.
Raises:
TypeError: If the first path element is not a string, or if any path element
is not a string or an int.
"""
return base_var + "".join(x.code for x in path) | 5,356,505 |
def template_footer(in_template):
"""Extracts footer from the notebook template.
Args:
in_template (str): Input notebook template file path.
Returns:
list: List of lines.
"""
footer = []
template_lines = []
footer_start_index = 0
with open(in_template) as f:
template_lines = f.readlines()
for index, line in enumerate(template_lines):
if '## Display Earth Engine data layers' in line:
footer_start_index = index - 3
footer = ['\n'] + template_lines[footer_start_index:]
return footer | 5,356,506 |
def set_config(config):
""" Updates the current configuration. """
# pylint: disable=unused-argument
pass | 5,356,507 |
def get_public_methods(tree):
""" Return a list of methods marked as public.
The function walks the given tree and extracts
all objects that are functions which are marked
public.
"""
for node in ast.walk(tree):
if is_public(node):
yield node | 5,356,508 |
def conf_auc(test_predictions, ground_truth, bootstrap=1000, seed=None, confint=0.95):
"""Takes as input test predictions, ground truth, number of bootstraps, seed, and confidence interval"""
#inspired by https://stackoverflow.com/questions/19124239/scikit-learn-roc-curve-with-confidence-intervals by ogrisel
bootstrapped_scores = []
rng = np.random.RandomState(seed)
if confint>1:
confint=confint/100
for i in range(bootstrap):
# bootstrap by sampling with replacement on the prediction indices
indices = rng.randint(0, len(test_predictions) - 1, len(test_predictions))
if len(np.unique(ground_truth[indices])) < 2:
continue
score = metrics.roc_auc_score(ground_truth[indices], test_predictions[indices])
bootstrapped_scores.append(score)
sorted_scores = np.array(bootstrapped_scores)
sorted_scores.sort()
lower_bound=(1-confint)/2
upper_bound=1-lower_bound
confidence_lower = sorted_scores[int(lower_bound * len(sorted_scores))]
confidence_upper = sorted_scores[int(upper_bound * len(sorted_scores))]
auc = metrics.roc_auc_score(ground_truth, test_predictions)
print("{:0.0f}% confidence interval for the score: [{:0.3f} - {:0.3}] and your AUC is: {:0.3f}".format(confint*100, confidence_lower, confidence_upper, auc))
confidence_interval = (confidence_lower, auc, confidence_upper)
return confidence_interval | 5,356,509 |
def empty_heap():
"""Instantiate a heap for testing."""
from binheap import BinHeap
min_heap = BinHeap()
return min_heap | 5,356,510 |
def get_requires_python(dist):
# type: (pkg_resources.Distribution) -> Optional[str]
"""
Return the "Requires-Python" metadata for a distribution, or None
if not present.
"""
pkg_info_dict = get_metadata(dist)
requires_python = pkg_info_dict.get('Requires-Python')
if requires_python is not None:
# Convert to a str to satisfy the type checker, since requires_python
# can be a Header object.
requires_python = str(requires_python)
return requires_python | 5,356,511 |
def standard_primary_main_prefixes(primary = None):
"""Return list of standard prefixes that may go with particular primary
name.
**Note**
You may wish to use `StandardPrimaryMainPrefixes()` instead.
**Description**
The function returns, a list of main prefixes that may go together
with the given ``primary`` name. So, if, for example, the ``primary`` is
``"PROGRAMS"``, the supported main prefixes will be ``["bin", "sbin",
"libexec", "pkglibexec", "noinst", "check"]``. If the ``primary`` is
``None``, then entire dictionary describing allowed combinations is
returned. The dictionary has form::
{ 'PROGRAMS' : ["bin","sbin","libexec","pkglibexec","noinst","check"],
'LIBRARIES' : ["lib","pkglib","noinst","check"], ... },
is returned.
The lists were developed according to automake's documentation, especially:
- ``PROGRAMS`` : `Defining program sources`_ section,
- ``LIBRARIES`` : `Building a library`_ section,
- ``LTLIBRARIES`` : `Building Libtool Libraries`_ section,
- ``LISP`` : `Emacs Lisp`_ section,
- ``PYTHON`` : `Python`_ section,
- ``JAVA`` : `Java bytecode compilation`_ section,
- ``SCRIPTS`` : `Executable scripts`_ section,
- ``DATA`` : `Architecture-independent data files`_ section,
- ``HEADERS`` : `Header files`_ secition,
- ``MANS`` : `Man pages`_ section,
- ``TEXINFOS`` : `Texinfo`_ section
.. _Defining program sources: http://www.gnu.org/software/automake/manual/automake.html#Program-Sources
.. _Building a library: http://www.gnu.org/software/automake/manual/automake.html#A-Library
.. _Building Libtool Libraries: http://www.gnu.org/software/automake/manual/automake.html#Libtool-Libraries
.. _Emacs Lisp: http://www.gnu.org/software/automake/manual/automake.html#Emacs-Lisp
.. _Python: http://www.gnu.org/software/automake/manual/automake.html#Python
.. _Java bytecode compilation: http://www.gnu.org/software/automake/manual/automake.html#Java
.. _Executable scripts: http://www.gnu.org/software/automake/manual/automake.html#Scripts
.. _Architecture-independent data files: http://www.gnu.org/software/automake/manual/automake.html#Data
.. _Header files: http://www.gnu.org/software/automake/manual/automake.html#Headers
.. _Man pages: http://www.gnu.org/software/automake/manual/automake.html#Man-Pages
.. _Texinfo: http://www.gnu.org/software/automake/manual/automake.html#Texinfo
"""
if primary is None:
return __std_primary_main_prefixes
elif primary in __std_primary_main_prefixes:
return __std_primary_main_prefixes[primary]
else:
return [] | 5,356,512 |
def post_step1(records):
"""Apply whatever extensions we have for GISTEMP step 1, that run
after the main step 1. None at present."""
return records | 5,356,513 |
def gen_report_complex_no_files() -> dp.Report:
"""Generate a complex layout report with simple elements"""
select = dp.Select(blocks=[md_block, md_block], type=dp.SelectType.TABS)
group = dp.Group(md_block, md_block, columns=2)
toggle = dp.Toggle(md_block, md_block)
return dp.Report(
dp.Page(
blocks=[
dp.Group(md_block, md_block, columns=2),
dp.Select(blocks=[md_block, group, toggle], type=dp.SelectType.DROPDOWN),
],
title="Page Uno",
),
dp.Page(
blocks=[
dp.Group(select, select, toggle, columns=2),
dp.Select(blocks=[md_block, md_block, md_block], type=dp.SelectType.TABS),
],
title="Page Duo",
),
dp.Page(
blocks=[
dp.Group(group, group, columns=2),
dp.Select(blocks=[select, select], type=dp.SelectType.TABS),
],
title="Page Tres",
),
) | 5,356,514 |
def test_processing_entry_form(test_client,test_imaged_request_nonadmin):
""" Test that an admin cannot access the processing entry form
for lightserv-test request. This is to avoid a conflict between user and admin
submission for the same processing request"""
imager = current_app.config['IMAGING_ADMINS'][-1]
with test_client.session_transaction() as sess:
sess['user'] = imager
response = test_client.get(url_for('processing.processing_entry',
username='lightserv-test',request_name='nonadmin_request',sample_name='sample-001',
imaging_request_number=1,processing_request_number=1)
, follow_redirects=True)
assert b'The processor has already been assigned for this entry and you are not them' in response.data
assert b'Welcome to the Brain Registration and Histology Core Facility' in response.data
""" Test that lightserv-test can access the processing entry form
for his request"""
with test_client.session_transaction() as sess:
sess['user'] = 'lightserv-test'
response = test_client.get(url_for('processing.processing_entry',
username='lightserv-test',request_name='nonadmin_request',sample_name='sample-001',
imaging_request_number=1,processing_request_number=1)
, follow_redirects=True)
assert b'Processing Entry Form' in response.data
assert b'nonadmin_request' in response.data
""" Test that a nonadmin can submit the processing entry form
for a test sample """
data = {
'image_resolution_forms-0-image_resolution':'1.3x',
'image_resolution_forms-0-ventral_up':0,
'image_resolution_forms-0-channel_forms-0-channel_name':'488',
'image_resolution_forms-0-channel_forms-0-ventral_up':0,
'image_resolution_forms-0-atlas_name':'allen_2017',
'image_resolution_forms-0-image_resolution':'1.3x',
'submit':True
}
username = "lightserv-test"
request_name = "nonadmin_request"
sample_name = "sample-001"
imaging_request_number = 1
processing_request_number = 1
response = test_client.post(url_for('processing.processing_entry',
username=username,request_name=request_name,sample_name=sample_name,
imaging_request_number=imaging_request_number,
processing_request_number=processing_request_number),
data=data,
follow_redirects=True)
assert b"core facility requests" in response.data
assert b"Processing entry form" not in response.data
processing_request_contents = db_lightsheet.Request.ProcessingRequest() & \
f'request_name="{request_name}"' & \
f'username="{username}"' & f'sample_name="{sample_name}"' & \
f'imaging_request_number="{imaging_request_number}"' & \
f'processing_request_number="{processing_request_number}"'
processing_progress = processing_request_contents.fetch1('processing_progress')
assert processing_progress == 'running'
""" Test that the processing entry form shows a flash message
that it is read only if the processing request has already been submitted
"""
response = test_client.get(url_for('processing.processing_entry',
username='lightserv-test',request_name='nonadmin_request',sample_name='sample-001',
imaging_request_number=1,processing_request_number=1)
, follow_redirects=True)
assert b'Processing Entry Form' in response.data
assert b'nonadmin_request' in response.data
warning_message = ("Processing is running for this sample. "
"This page is read only and hitting submit will do nothing")
assert warning_message.encode('utf-8') in response.data
""" Test that the processing entry form redirects
to the processing manager if a post request is received and the entry
form has already been submitted in the past.
"""
response = test_client.post(url_for('processing.processing_entry',
username='lightserv-test',request_name='nonadmin_request',sample_name='sample-001',
imaging_request_number=1,processing_request_number=1),
data = {
'image_resolution_forms-0-image_resolution':'1.3x',
'image_resolution_forms-0-channel_forms-0-channel_name':'488',
'image_resolution_forms-0-atlas_name':'allen_2017',
'image_resolution_forms-0-image_resolution':'1.3x',
'submit':True
}, follow_redirects=True)
assert b'Processing management GUI' in response.data
warning_message = ("Processing is running for this sample. "
"It cannot be re-processed. To open a new processing request, "
"see your request page")
assert warning_message.encode('utf-8') in response.data | 5,356,515 |
def suggested_associations(wiki_title, language='de'):
"""Given a Wikipedia page title, return a list of suggested associations for this entry."""
# The main heuristic to determine relevant associations for a given Wikipedia entry is to first gather all
# articles that this entry's summary links to.
wiki = wikipediaapi.Wikipedia(language)
article = wiki.page(wiki_title)
links = article.links
# We encounter two problems:
# 1. links is a dictionary, so we lose information about the order in which links appear in the article
# 2. We are only interested in links appearing in the article's summary.
# We can overcome this by scraping the article's page ourselves and parsing it.
url = article.fullurl
html = requests.get(url)
bs = BeautifulSoup(html.text, "html.parser")
# The summary comprises all p-elements located before (but in the same hierarchy level as) the table of contents.
toc = bs.find(id='toc')
summary_ps = toc.find_all_previous('p')
# They are currently in reverse order.
summary_ps = list(reversed(summary_ps))
# Collect all links.
summary_as = []
for p in summary_ps:
summary_as += [a for a in p.find_all('a')]
# The link text itself may be an inflection of the article name, which can be accessed by through the 'title'
# attribute.
summary_references = []
for a in summary_as:
# Not all links point to a Wikipedia article, but those that do have a 'title' attribute.
if a.has_attr('title'):
title = a['title']
if title in links:
summary_references.append(links[title])
# 'summary_links' now contains the list of Wikipedia articles reference in the summary and in the order of their
# appearance.
associations = [article.title for article in summary_references]
# We can further improve the quality of the titles by filtering out irrelevant articles.
irrelevant = [
"^Liste",
"^Hilfe:",
"^Datei:",
".*Kalender$",
".*\d{4}.*",
"^\d{1,2}\. \w+$"
]
keep_associations = []
for assoc in associations:
keep = True
for pattern in irrelevant:
regex = re.compile(pattern)
if regex.match(assoc):
keep = False
break
if keep:
keep_associations.append(assoc)
associations = keep_associations
# remove any words in parenthesis
for (i, assoc) in enumerate(associations):
if '(' in assoc:
associations[i] = re.sub(" \(.*\)", '', assoc)
return associations | 5,356,516 |
def manage_categories():
"""
Manage expense categories
"""
alert_message = ""
user = User.query.filter_by(id=session["user_id"]).scalar()
if request.method == "GET":
with app.app_context():
categories = (
Category.query.options(joinedload("category_type"))
.options(joinedload("account"))
.filter(Category.user_id == session["user_id"])
.all()
)
return render_template(
"categories.html",
username=user.username,
alert_message=alert_message,
categories=categories,
)
if request.method == "POST":
category_id = request.form.get("edit")
with app.app_context():
category = (
Category.query.options(joinedload("category_type"))
.options(joinedload("account"))
.filter(Category.user_id == session["user_id"])
.filter(Category.id == category_id)
.scalar()
)
category_types = CategoryType.query.all()
accounts = Account.query.filter_by(user_id=session["user_id"]).all()
return render_template(
"edit_category.html",
username=user.username,
category=category,
category_types=category_types,
accounts=accounts,
) | 5,356,517 |
def shear_3d(sxy=0., sxz=0., syx=0., syz=0., szx=0., szy=0.):
"""
Returns transformation matrix for 3d shearing.
Args:
sxy: xy shearing factor
sxz: xz shearing factor
syx: yx shearing factor
syz: yz shearing factor
szx: zx shearing factor
szy: zy shearing factor
Returns:
A 4x4 float32 transformation matrix.
"""
matrix = jnp.array([[ 1, sxy, sxz, 0],
[syx, 1, syz, 0],
[szx, szy, 1, 0],
[ 0, 0, 0, 1]], dtype='float32')
return matrix | 5,356,518 |
def Position(context):
"""Function: <number> position()"""
return context.position | 5,356,519 |
def spatial_shift_crop_list(size, images, spatial_shift_pos, boxes=None):
"""
Perform left, center, or right crop of the given list of images.
Args:
size (int): size to crop.
image (list): ilist of images to perform short side scale. Dimension is
`height` x `width` x `channel` or `channel` x `height` x `width`.
spatial_shift_pos (int): option includes 0 (left), 1 (middle), and
2 (right) crop.
boxes (list): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (ndarray): the cropped list of images with dimension of
`height` x `width` x `channel`.
boxes (list): optional. Corresponding boxes to images. Dimension is
`num boxes` x 4.
"""
assert spatial_shift_pos in [0, 1, 2]
height = images[0].shape[0]
width = images[0].shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_shift_pos == 0:
y_offset = 0
elif spatial_shift_pos == 2:
y_offset = height - size
else:
if spatial_shift_pos == 0:
x_offset = 0
elif spatial_shift_pos == 2:
x_offset = width - size
cropped = [
image[y_offset : y_offset + size, x_offset : x_offset + size, :]
for image in images
]
assert cropped[0].shape[0] == size, "Image height not cropped properly"
assert cropped[0].shape[1] == size, "Image width not cropped properly"
if boxes is not None:
for i in range(len(boxes)):
boxes[i][:, [0, 2]] -= x_offset
boxes[i][:, [1, 3]] -= y_offset
return cropped, boxes | 5,356,520 |
def get_input_args():
"""
Used to parse the command line arguments in order to predict the flower name and the class probability.
Options:
Return top KK most likely classes: python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference: python predict.py input checkpoint --gpu
"""
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser(
description='Process Image Folder, CNN Model Architecture, Set hyper parameters')
parser.add_argument('single_image', metavar='single_image', type=str, nargs=1,
help='a single image for which the flower name and the class probability is to be predicted')
parser.add_argument('checkpoint', metavar='checkpoint', type=str, nargs=1,
help='The checkpoint from which the model is re-built for the prediction')
parser.add_argument('--top_k', type=int, default='3',
help='The number of most likely classes with default value \'3\'')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='A file mapping of categories to real names with default value \'cat_to_name.json\'')
parser.add_argument('--gpu', action='store_true',
help='If available then the GPU will be used, else not')
return parser.parse_args() | 5,356,521 |
def testsuite_results(log_filename, msg_testsuite_section_start,
msg_testsuite_end_message):
"""Read the NEST Travis CI build log file, find the 'make-installcheck'
section which runs the NEST test suite. Extract the total number of tests
and the number of tests failed. Return True if all tests passed
successfully and False in case one or more tests failed. Additionally the
total number of tests performed and the number of tests failed are
returned.
Parameters
----------
log_filename: NEST Travis CI build log file name.
msg_testsuite_section_start: Message number string, e.g. "MSGBLD1234".
msg_testsuite_end_message: Message number string, e.g. "MSGBLD1234".
Returns
-------
True or False.
Total number of tests.
Number of tests failed.
"""
in_installcheck_section = False
in_results_section = False
total_number_of_tests = None
number_of_tests_failed = None
status_tests = None
with open(log_filename) as fh:
for line in fh:
if is_message(line, msg_testsuite_section_start):
in_installcheck_section = True
if in_installcheck_section:
if line.strip() == "NEST Testsuite Summary":
in_results_section = True
if in_results_section:
if "Total number of tests:" in line:
total_number_of_tests = int(line.split(' ')[-1])
if "Failed" in line:
number_of_tests_failed = \
[int(s) for s in line.split() if s.isdigit()][0]
if is_message(line, msg_testsuite_end_message):
if number_of_tests_failed == 0:
status_tests = True
else:
status_tests = False
# The log file contains only one 'make-installcheck'
# section. Stop reading the log file.
break
return status_tests, total_number_of_tests, number_of_tests_failed | 5,356,522 |
def line_state_to_out(line: StaticStates, out_data: bool):
"""
Calculate the data and enable values given a initial state
Args:
line: StaticState that represent the line
out_data: If line value is 2 it will be returned as the next value of data
Returns:
Data and Enable values for the next iteration
"""
data = False
enable = False
if line.value == 0:
data = False
enable = True
elif line.value == 1:
data = True
enable = True
elif line.value == 2:
data = out_data
enable = False
return data, enable | 5,356,523 |
def GetProxyConfig(http_proxy_uri=None, https_proxy_uri=None, cafile=None,
disable_certificate_validation=None):
"""Returns an initialized ProxyConfig for use in testing.
Args:
http_proxy_uri: A str containing the full URI for the http proxy host. If
this is not specified, the ProxyConfig will be initialized without an
HTTP proxy configured.
https_proxy_uri: A str containing the full URI for the https proxy host. If
this is not specified, the ProxyConfig will be initialized without an
HTTPS proxy configured.
cafile: A str containing the path to a custom ca file.
disable_certificate_validation: A boolean indicating whether or not to
disable certificate validation.
Returns:
An initialized ProxyConfig using the given configurations.
"""
return googleads.common.ProxyConfig(
http_proxy_uri, https_proxy_uri, cafile=cafile,
disable_certificate_validation=disable_certificate_validation) | 5,356,524 |
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
:param env: test env instance
:param extra_data: the case name or case list or case dictionary
:return: None
"""
case_config = format_test_case_config(extra_data)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
# Before exit test function this flag is used to log if the case fails
failed_cases = []
for ut_config in case_config:
Utility.console_log("Running unit test for config: " + ut_config, "O")
dut = env.get_dut("unit-test-app", app_path=ut_config)
if len(case_config[ut_config]) > 0:
replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
dut.start_app()
for one_case in case_config[ut_config]:
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
try:
run_one_multiple_stage_case(dut, one_case, junit_test_case)
except TestCaseFailed:
failed_cases.append(one_case["name"])
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
finally:
TinyFW.JunitReport.test_case_finish(junit_test_case)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed") | 5,356,525 |
def get_possible_paths(path: str) -> List[str]:
"""
Finds possible paths to resources, considering PACKAGE and USER directories first, then system-wide directories
:param path:
:return:
"""
# <sphinx_resources-get_possible_paths>
dist_name = env.distribution_name() # RKD_DIST_NAME env variable
paths = [
# eg. ~/.local/share/rkd/banner.txt
os.path.expanduser(('~/.local/share/%s/' + path) % dist_name),
# eg. /home/andrew/.local/lib/python3.8/site-packages/rkd/misc/banner.txt
(get_user_site_packages() + '/%s/misc/' + path) % dist_name,
# eg. /usr/lib/python3.8/site-packages/rkd/misc/banner.txt
(_get_global_site_packages() + '/%s/misc/' + path) % dist_name,
# eg. /usr/share/rkd/banner.txt
('/usr/share/%s/' + path) % dist_name
]
# </sphinx_resources-get_possible_paths>
# eg. ./rkd/misc/banner.txt
global_module_path = _get_current_script_path() + '/misc/' + path
# installed module directory should be less important to allow customizations
if "site-packages" in global_module_path:
paths.append(global_module_path)
else: # local development directory
paths = [global_module_path] + paths
return paths | 5,356,526 |
def test_parametrized_collected_from_command_line(testdir):
"""Parametrized test not collected if test named specified
in command line issue#649.
"""
py_file = testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
"""
)
file_name = os.path.basename(py_file.strpath)
rec = testdir.inline_run(file_name + "::" + "test_func")
rec.assertoutcome(passed=3) | 5,356,527 |
def inchi_key_to_chembl(inchi_keys):
"""Return list of chembl ids that positionally map to inchi keys."""
molecule = new_client.molecule
chembl_mols = []
ndone = 0 # counter for printing progress to console
for inchi_key in inchi_keys:
if pd.isnull(inchi_key):
chembl_mols.append('')
continue
try:
mol = molecule.get(inchi_key)
if mol and mol['molecule_chembl_id']:
chembl_mols.append(mol['molecule_chembl_id'])
else:
chembl_mols.append('')
except:
chembl_mols.append('')
print('in error: ' + inchi_key)
# increment progress tracker and print after 100th id conversion
ndone += 1
if ndone % 100 == 0:
print('... completed ' + ndone + ' / ' + len(inchi_keys))
return chembl_mols | 5,356,528 |
def do_kube_version_show(cc, args):
"""Show kubernetes version details"""
try:
version = cc.kube_version.get(args.version)
_print_kube_version_show(version)
except exc.HTTPNotFound:
raise exc.CommandError('kubernetes version not found: %s' %
args.version) | 5,356,529 |
def StandaloneStyle(cfg):
"""
Construct a OWS style object that stands alone, independent of a complete OWS configuration environment.
:param cfg: A valid OWS Style definition configuration dictionary.
Refer to the documentation for the valid syntax:
https://datacube-ows.readthedocs.io/en/latest/cfg_styling.html
:return: A OWS Style Definition object, prepared to work in standalone mode.
"""
style = StyleDefBase(StandaloneProductProxy(), cfg, stand_alone=True)
style.make_ready(None)
return style | 5,356,530 |
def __save_cnf_matrix(dataset_id, round_id, part, y_names, cnf_matrix):
"""
save confusion matrix
:param dataset_id: dataset id
:param round_id: round id
:param part: 'eval' or 'test'
:param y_names: y labels
:param cnf_matrix: confusion matrix (actual / predict)
:return: None
"""
pickle.dump([y_names, cnf_matrix], open(get_dataset_folder(dataset_id) + '/predict/%s_%s_cnf.pkl' % (round_id, part), 'wb')) | 5,356,531 |
def insert_or_test_version_number():
"""Should the format name and version number be inserted in text
representations (not in tests!)"""
return INSERT_AND_CHECK_VERSION_NUMBER | 5,356,532 |
def set_seed(seed):
"""
Setting random seeds
"""
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed) | 5,356,533 |
def add_login_routes(app):
"""
initializes this flask for providing access to /login and /logout
:param app:
:return:
"""
manager().add_login_routes(app) | 5,356,534 |
def IsInteractive(output=False, error=False, heuristic=False):
"""Determines if the current terminal session is interactive.
sys.stdin must be a terminal input stream.
Args:
output: If True then sys.stdout must also be a terminal output stream.
error: If True then sys.stderr must also be a terminal output stream.
heuristic: If True then we also do some additional heuristics to check if
we are in an interactive context. Checking home path for example.
Returns:
True if the current terminal session is interactive.
"""
if not sys.stdin.isatty():
return False
if output and not sys.stdout.isatty():
return False
if error and not sys.stderr.isatty():
return False
if heuristic:
# Check the home path. Most startup scripts for example are executed by
# users that don't have a home path set. Home is OS dependent though, so
# check everything.
# *NIX OS usually sets the HOME env variable. It is usually '/home/user',
# but can also be '/root'. If it's just '/' we are most likely in an init
# script.
# Windows usually sets HOMEDRIVE and HOMEPATH. If they don't exist we are
# probably being run from a task scheduler context. HOMEPATH can be '\'
# when a user has a network mapped home directory.
# Cygwin has it all! Both Windows and Linux. Checking both is perfect.
home = os.getenv('HOME')
homepath = os.getenv('HOMEPATH')
if not homepath and (not home or home == '/'):
return False
return True | 5,356,535 |
def fulfillment(ctx, action, shop, tracking, provider, order_id, filename, message):
"""
add/edit shipping tracking (by skus or by file)
"""
click.echo('{} fulfillment at {}... send mesage \
to customer: {}'.format(action, shop, message))
project = ctx.obj['project']
config = ctx.obj["config"]
if config is None:
click.secho('create project first',
fg='yellow')
return
project.initialize_project(config)
table_classes = project.get_table_objects()
sessionmaker = project.get_session_maker()
session = sessionmaker()
shop_conn = project.get_shop_connector(shop)
if shop_conn is None:
click.secho('shop setting not found: {}'.format(shop),
fg='yellow')
session.close()
return
get_front_shop_id(table_classes, session, shop_conn)
if action == 'add':
_add_tracking(session, table_classes, shop_conn, order_id,
tracking, provider, filename, message=message)
elif action == 'edit':
_update_fulfillment(session, table_classes, shop_conn, order_id,
tracking, provider, filename, message=message)
elif action == 'show':
shipment = None
if tracking is not None:
shipment = get_shipping_info(table_classes, session,
{'tracking_id': tracking})
else:
shipment = get_shipping_info(table_classes, session,
{'destination_order_id': order_id,
'front_shop_id': shop_conn.front_shop_id})
click.echo(shipment)
else:
click.secho('action not supported: {}'.format(action),
fg='yellow')
session.close() | 5,356,536 |
def get_ipaserver_host():
"""Return the fqdn of the node hosting the IPA_SERVER.
"""
for node in svars['nodes']:
if 'ipaserver' in node['groups']:
return fqdn(node['name']) | 5,356,537 |
def reverse_file_read(fp):
"""
a generator that returns the lines of a file in reverse order
"""
line = ''
fp.seek(0, os.SEEK_END)
offset = fp.tell()
while offset > 0:
offset = max(0, offset - 1)
fp.seek(offset)
byte = fp.read(1)
if byte == '\n':
yield line
line = ''
else:
line = byte + line
yield line | 5,356,538 |
def hsv_to_rgb(image):
"""
Convert HSV img to RGB img.
Args:
image (numpy.ndarray): NumPy HSV image array of shape (H, W, C) to be converted.
Returns:
numpy.ndarray, NumPy HSV image with same shape of image.
"""
h, s, v = image[:, :, 0], image[:, :, 1], image[:, :, 2]
to_rgb = np.vectorize(colorsys.hsv_to_rgb)
r, g, b = to_rgb(h, s, v)
return np.stack((r, g, b), axis=2) | 5,356,539 |
def dom_to_tupletree(node):
"""Convert a DOM object to a pyRXP-style tuple tree.
Each element is a 4-tuple of (NAME, ATTRS, CONTENTS, None).
Very nice for processing complex nested trees.
"""
if node.nodeType == node.DOCUMENT_NODE:
# boring; pop down one level
return dom_to_tupletree(node.firstChild)
assert node.nodeType == node.ELEMENT_NODE
name = node.nodeName
attrs = {}
contents = []
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
contents.append(dom_to_tupletree(child))
elif child.nodeType == child.TEXT_NODE:
assert is_text(child.nodeValue), \
"text node %s is not a string" % repr(child)
contents.append(child.nodeValue)
elif child.nodeType == child.CDATA_SECTION_NODE:
contents.append(child.nodeValue)
else:
raise RuntimeError("can't handle %s" % child)
for i in range(node.attributes.length):
attr_node = node.attributes.item(i)
attrs[attr_node.nodeName] = attr_node.nodeValue
# XXX: Cannot yet handle comments, cdata, processing instructions and
# other XML batshit.
# it's so easy in retrospect!
return name, attrs, contents, None | 5,356,540 |
def import_bom_rf3(filename, **kwargs):
"""Import a NetCDF radar rainfall product from the BoM Rainfields3.
Parameters
----------
filename : str
Name of the file to import.
Returns
-------
out : tuple
A three-element tuple containing the rainfall field in mm/h imported
from the Bureau RF3 netcdf, the quality field and the metadata. The
quality field is currently set to None.
"""
if not netcdf4_imported:
raise MissingOptionalDependency(
"netCDF4 package is required to import BoM Rainfields3 products "
"but it is not installed"
)
R = _import_bom_rf3_data(filename)
geodata = _import_bom_rf3_geodata(filename)
metadata = geodata
# TODO(import_bom_rf3): Add missing georeferencing data.
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(R)
if np.any(np.isfinite(R)):
metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
else:
metadata["threshold"] = np.nan
return R, None, metadata | 5,356,541 |
def pipe_collapse(fq, outdir, gzipped=True):
"""
Collapse, by sequence
"""
fname = filename(fq)
check_path(outdir)
fq_out = collapse_fx(fq, outdir, gzipped=True)
stat_fq(fq_out)
# 1U 10A
fq_list = split_fq_1u10a(fq_out)
for f in fq_list:
stat_fq(f)
# wrap stat
df = wrap_stat_fq(outdir)
return [fq_out, df] | 5,356,542 |
def render_user_weekly_artists(report, file_path, size=(420, 300)):
"""
:type report: temperfm.records.UserWeeklyArtistReport
:type file_path: str
:type size: tuple[int, int]
"""
margin_size = 60, 35
graph_size = size[0] - margin_size[0], size[1] - margin_size[1]
graph_border = 2
font_size = 12
weekly_scores = _get_weekly_score_totals(report)
weekly_plays = _get_weekly_total_plays(report)
weekly_plays_max = max(weekly_plays)
graph_week_span = graph_size[0] / (len(weekly_scores) - 1)
# Normalize and sanitize scores
for i, week_scores in enumerate(weekly_scores):
score_sum = sum(week_scores)
# Sanitize zero sum week scores
j = i
while score_sum == 0:
if i == 0:
score_sum = len(week_scores)
week_scores = [1] * score_sum
else:
j -= 1
week_scores = weekly_scores[j]
score_sum = sum(week_scores)
# Normalize
weekly_scores[i] = [score / score_sum for score in week_scores]
# Build cluster paths
cluster_positions = []
last_scores = [0] * len(weekly_scores)
""":type: list[float]"""
for i in range(len(report.clusters) - 1):
scores = [week_scores[i] for week_scores in weekly_scores]
scores = [scores[j] + last_scores[j] for j in range(len(scores))]
cluster_positions.append([score * graph_size[1] for score in scores])
last_scores = scores
cluster_positions.append([graph_size[1]] * len(weekly_scores))
# Begin drawing
dwg = svgwrite.Drawing(file_path, size=size)
graph_clip = dwg.clipPath(id='graph_clip')
graph_clip.add(dwg.rect((0, 0), graph_size))
dwg.defs.add(graph_clip)
graph = dwg.g(clip_path='url(#graph_clip)')
graph.translate(margin_size[0] + graph_border)
dwg.add(graph)
# Cluster fill
points_prev = [(0, 0), (graph_size[0], 0)]
for i, positions in enumerate(cluster_positions):
cluster = report.clusters[i]
x_span = graph_size[0] / (len(positions) - 1)
points = [(x * x_span, y) for x, y in enumerate(positions)]
commands = [f'M0,{points[0][1]}'] + _get_monotonic_spline_commands(points) + \
[f'L{graph_size[0]},{points_prev[-1][1]}'] + _get_monotonic_spline_commands(list(reversed(points_prev)))
graph.add(dwg.path(commands, fill=_color_to_css(cluster.color)))
points_prev = points
# # Play counts
for i in range(len(weekly_plays)):
width = int(graph_week_span) - 4
height = int((weekly_plays[i] / weekly_plays_max) * graph_size[1] * 0.9)
x = int(((graph_week_span * i) - (graph_week_span / 2))) + 2
y = int((graph_size[1] / 2) - (height / 2))
graph.add(dwg.rect((x, y), (width, height), fill=_color_to_css((0.94, 0.94, 0.94)), fill_opacity=0.14))
# Cluster stroke
for i, positions in enumerate(cluster_positions[:-1]):
x_span = graph_size[0] / (len(positions) - 1)
points = [(x * x_span, y) for x, y in enumerate(positions)]
commands = [f'M0,{points[0][1]}'] + _get_monotonic_spline_commands(points)
graph.add(dwg.path(commands, stroke=_color_to_css((255, 255, 255)), stroke_width=1, fill_opacity=0))
# # Cluster labels
last_y = 0
for i, cluster in enumerate(report.clusters):
y = cluster_positions[i][0]
if i > 0:
y += cluster_positions[i - 1][0]
y = max((last_y + font_size, y / 2))
last_y = y
_draw_text(dwg, cluster.name, (margin_size[0], y), cluster.color, size=font_size)
# Modulo to skip every n week date labels. 1 + floor(1 / (available space / required space))
skip_mod = 1 + int(1 / ((graph_size[0] / (font_size * (len(report.artist_weekly)))) / (font_size * 0.175)))
# Week date labels
for i in range(len(report.artist_weekly)):
if (len(report.artist_weekly) - i - 1) % skip_mod != 0:
continue
date = min((report.start_date + datetime.timedelta(days=(i * 7) + 6), datetime.date.today()))
date_str = date.strftime('%b-%d')
_draw_text(dwg, date_str, ((graph_week_span * i) + margin_size[0], graph_size[1] + font_size - graph_border),
(0.21, 0.21, 0.21), size=font_size)
dwg.save() | 5,356,543 |
def multi_bw(init, y, X, n, k, family, tol, max_iter, rss_score, gwr_func,
bw_func, sel_func, multi_bw_min, multi_bw_max, bws_same_times,
verbose=False):
"""
Multiscale GWR bandwidth search procedure using iterative GAM backfitting
"""
if init is None:
bw = sel_func(bw_func(y, X))
optim_model = gwr_func(y, X, bw)
else:
bw = init
optim_model = gwr_func(y, X, init)
bw_gwr = bw
err = optim_model.resid_response.reshape((-1, 1))
param = optim_model.params
XB = np.multiply(param, X)
if rss_score:
rss = np.sum((err)**2)
iters = 0
scores = []
delta = 1e6
BWs = []
bw_stable_counter = 0
bws = np.empty(k)
gwr_sel_hist = []
try:
from tqdm.auto import tqdm #if they have it, let users have a progress bar
except ImportError:
def tqdm(x, desc=''): #otherwise, just passthrough the range
return x
for iters in tqdm(range(1, max_iter + 1), desc='Backfitting'):
new_XB = np.zeros_like(X)
params = np.zeros_like(X)
for j in range(k):
temp_y = XB[:, j].reshape((-1, 1))
temp_y = temp_y + err
temp_X = X[:, j].reshape((-1, 1))
bw_class = bw_func(temp_y, temp_X)
if bw_stable_counter >= bws_same_times:
#If in backfitting, all bws not changing in bws_same_times (default 5) iterations
bw = bws[j]
else:
bw = sel_func(bw_class, multi_bw_min[j], multi_bw_max[j])
gwr_sel_hist.append(deepcopy(bw_class.sel_hist))
optim_model = gwr_func(temp_y, temp_X, bw)
err = optim_model.resid_response.reshape((-1, 1))
param = optim_model.params.reshape((-1, ))
new_XB[:, j] = optim_model.predy.reshape(-1)
params[:, j] = param
bws[j] = bw
#If bws remain the same as from previous iteration
if (iters > 1) and np.all(BWs[-1] == bws):
bw_stable_counter += 1
else:
bw_stable_counter = 0
num = np.sum((new_XB - XB)**2) / n
den = np.sum(np.sum(new_XB, axis=1)**2)
score = (num / den)**0.5
XB = new_XB
if rss_score:
predy = np.sum(np.multiply(params, X), axis=1).reshape((-1, 1))
new_rss = np.sum((y - predy)**2)
score = np.abs((new_rss - rss) / new_rss)
rss = new_rss
scores.append(deepcopy(score))
delta = score
BWs.append(deepcopy(bws))
if verbose:
print("Current iteration:", iters, ",SOC:", np.round(score, 7))
print("Bandwidths:", ', '.join([str(bw) for bw in bws]))
if delta < tol:
break
opt_bws = BWs[-1]
return (opt_bws, np.array(BWs), np.array(scores), params, err, gwr_sel_hist, bw_gwr) | 5,356,544 |
def _commands_with(name, from_cmake, start=0, end=-1):
"""
Returns a list of all cmkp._Command objects from a cmakeLists
with the given name.
"""
cmd_list = []
for (index, command) in enumerate(from_cmake[start:end]):
if isinstance(command, cmkp._Command) and command.name == name:
yield (index, command) | 5,356,545 |
def _auth_url(url):
"""Returns the authentication URL based on the URL originally requested.
Args:
url: String, the original request.url
Returns:
String, the authentication URL.
"""
parsed_url = urlparse.urlparse(url)
parsed_auth_url = urlparse.ParseResult(parsed_url.scheme,
parsed_url.netloc,
'/_auth',
parsed_url.params,
parsed_url.query,
parsed_url.fragment)
return parsed_auth_url.geturl() | 5,356,546 |
def ismount(path):
"""
Test whether a path is a mount point.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call.
"""
try:
s1 = os.lstat(path)
except os.error as err:
if err.errno == errno.ENOENT:
# It doesn't exist -- so not a mount point :-)
return False
raise
if stat.S_ISLNK(s1.st_mode):
# A symlink can never be a mount point
return False
s2 = os.lstat(os.path.join(path, '..'))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
# path/.. is the same i-node as path
return True
return False | 5,356,547 |
def getSupportedPrintTypes(mainControl, guiParent=None):
"""
Returns dictionary {printTypeName: (printObject, printTypeName,
humanReadableName, addOptPanel)}
addOptPanel is the additional options GUI panel and is always None if
guiParent is None
"""
return groupOptPanelPlugins(mainControl,
getPrintTypeDict(mainControl), guiParent=guiParent) | 5,356,548 |
def main():
"""
使用方法,自行搜索selenium配置教程,其实就是要下载一个chromedriver.exe, 放到chrome
根目录。
这个网站每一页的图片是一个http:*.webp, 跳转到这个链接还会继续跳转到图片真正的url
(以'.jpg'结尾)
"""
# 从这一章开始
lastChapter = 'https://m.duoduomh.com/manhua/zujienvyou/853617.html'
#避免后面的get参数比如 *.html?p=1 的影响,p是页书,而本脚本根据章节来保存下载下来的图片
cmpLen = lastChapter.find('.html')
opt = Options()
opt.add_argument('--headless') # 设置无界面启动
opt.add_argument('--disable-gpu')
opt.add_argument('blink-settings=imagesEnabled=false')
opt.add_argument('log-level=3')
# 我的chrome (其实是我的百分浏览器根目录在这)
exepath = r'G:\Program Files\CentBrowser\Application\chromedriver.exe'
browser = webdriver.Chrome(executable_path=exepath, chrome_options=opt)
# 开始的章节编号
beginchap = 128
# 总共128章
chapters = [[] for i in range(128 - beginchap + 1)]
currentChap = lastChapter
# for i in range(len(chapters)):
bar = tqdm(range(len(chapters)))
for i in bar:
bar.set_description(" Downloading Chapter %d" % (i + beginchap))
page = 0
lastChapter = currentChap
browser.get(currentChap)
while currentChap[:cmpLen] == lastChapter[:cmpLen]:
sleep(0.2) # 腾出时间给selenium进入链接
img_link = getLink(browser.page_source, 'img id="page-.*webp')
if img_link != None:
img_link = img_link[img_link.find('src="') + 5:]
chapters[i].append((img_link, page := page+1))
rootpath = r'G:\Downloads\租借女友'
savepath = rootpath + \
'\\chap%d\\page%d.jpg' % (beginchap + i, page)
downloadImg(img_link, savepath)
# 根据xpath找到 下一页 按钮,并模拟点击,浏览器F12,右键单击html源码,找一找可以找到
# 复制XPath的选项
browser.find_element_by_xpath(
'/html/body/div[1]/div[8]/div/ul/li[3]/a').click()
currentChap = browser.current_url
browser.quit()
print("downloaing finished")
print(chapters) | 5,356,549 |
def read_image(file_name: str) -> np.array:
"""
pomocna funkce na nacteni obrazku
:param file_name: cesta k souboru
:return: numpy array, pripravene na upravy pomoci nasich funkcni
"""
return np.asarray(Image.open(file_name), dtype=np.int32) | 5,356,550 |
def weighted_img(img, initial_img, α=0.8, β=1.0, γ=0.0):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ) | 5,356,551 |
def test_tri_root_improper():
"""
Test tri_root-related functions with arguments that are not triangular
numbers.
"""
t = np.arange(1000)
with pytest.raises(ValueError):
tri.tri_root_strict(t)
# Check the truncated version works as expected
n = tri.tri_root_trunc(t)
t_trunc = tri.tri_n(n)
assert np.all((t_trunc <= t) & (t < tri.tri_n(n + 1)))
# Check remainder function
root, rem = tri.tri_root_rem(t)
assert np.array_equal(root, n)
assert np.array_equal(tri.tri_n(root) + rem, t) | 5,356,552 |
def md5SessionKey(params, password):
"""
If the "algorithm" directive's value is "MD5-sess", then A1
[the session key] is calculated only once - on the first request by the
client following receipt of a WWW-Authenticate challenge from the server.
This creates a 'session key' for the authentication of subsequent
requests and responses which is different for each "authentication
session", thus limiting the amount of material hashed with any one
key.
Because the server need only use the hash of the user
credentials in order to create the A1 value, this construction could
be used in conjunction with a third party authentication service so
that the web server would not need the actual password value. The
specification of such a protocol is beyond the scope of this
specification.
"""
keys = ("username", "realm", "nonce", "cnonce")
params_copy = {}
for key in keys:
params_copy[key] = params[key]
params_copy["algorithm"] = MD5_SESS
return _A1(params_copy, password) | 5,356,553 |
def all_state_action(buffer: RolloutBuffer, learner: BaseAlgorithm, state_only: bool = False):
""" Equivalent of state_action on the whole RolloutBuffer."""
o_shape = get_obs_shape(learner.observation_space)
t = lambda x, shape=[-1]: buffer.to_torch(x).view(buffer.buffer_size*buffer.n_envs, *shape)
if isinstance(buffer.observations, dict):
observations = {k: t(v, o_shape[k]) for k, v in buffer.observations.items()} # OrderedDict?
else:
observations = t(buffer.observations, o_shape)
actions = t(buffer.actions)
return state_action(observations, actions, learner, state_only) | 5,356,554 |
def write_results(conn, cursor, mag_dict, position_dict):
"""
Write star truth results to the truth table
Parameters
----------
conn is a sqlite3 connection to the database
cursor is a sqlite3.conneciton.cursor() object
mag_dict is a dict of mags. It is keyed on the pid of the
Process used to process a chunk of magnitudes. Each value
is a 2-D numpy array of shape (n_obj, n_bandpasses). It is
produced by calculate_magnitudes.
position_dict is a dict keyed on pid of the Process used to
process a chunk of stars. The values are also dicts, these
keyed on 'healpix', 'ra', 'dec', 'id' with the values being
arrays of those quantities for the corresponding chunk of
stars.
Returns
-------
None
Just writes to the database
"""
assert len(mag_dict) == len(position_dict)
row_ct = 0
for k in mag_dict.keys():
mm = mag_dict[k]
pp = position_dict[k]
row_ct += len(pp['ra'])
if len(mm) != len(pp['ra']):
raise RuntimeError('%d mm %d pp' % (len(mm), len(pp['ra'])))
values = ((int(pp['healpix'][i_obj]),
int(pp['id'][i_obj]), 1, 0, 0,
pp['ra'][i_obj], pp['dec'][i_obj], 0.0,
mm[i_obj][0], mm[i_obj][1], mm[i_obj][2],
mm[i_obj][3], mm[i_obj][4], mm[i_obj][5])
for i_obj in range(len(pp['ra'])))
cursor.executemany('''INSERT INTO truth
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', values)
conn.commit()
return row_ct | 5,356,555 |
def get_sparameters(sim: td.Simulation) -> np.ndarray:
"""Adapted from tidy3d examples.
Returns full Smatrix for a component
https://support.lumerical.com/hc/en-us/articles/360042095873-Metamaterial-S-parameter-extraction
"""
sim = run_simulation(sim).result()
def get_amplitude(monitor):
f, b = sim.data(monitor)["mode_amps"]
return np.squeeze(f), np.squeeze(b)
monitors = sim.monitors
n = len(monitors) - 1
S = np.zeros((n, n), dtype=np.complex128)
# for i, monitor_i in enumerate(monitors):
# for j, monitor_j in enumerate(monitors):
# if i > 0 and j > 0:
# if monitor_i.name.startswith("W"):
# ai, bi = get_amplitude(monitor_i)
# else:
# bi, ai = get_amplitude(monitor_i)
# if monitor_j.name.startswith("W"):
# aj, bj = get_amplitude(monitor_j)
# else:
# bj, aj = get_amplitude(monitor_j)
# S[i - i, j - 1] = bi / aj
if len(monitors) == 5:
_, incident, reflect, top, bot = monitors
S[0, 0] = get_amplitude(incident)[-1]
S[1, 0] = get_amplitude(reflect)[-1]
S[0, 1] = get_amplitude(top)[0]
S[1, 1] = get_amplitude(bot)[0]
elif len(monitors) == 3:
_, incident, reflect = monitors
S[0, 0] = S[1, 1] = get_amplitude(incident)[-1]
S[1, 0] = S[0, 1] = get_amplitude(reflect)[-1]
return S | 5,356,556 |
def mapmri_STU_reg_matrices(radial_order):
""" Generates the static portions of the Laplacian regularization matrix
according to [1]_ eq. (11, 12, 13).
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
Returns
-------
S, T, U : Matrices, shape (N_coef,N_coef)
Regularization submatrices
References
----------
.. [1]_ Fick, Rutger HJ, et al. "MAPL: Tissue microstructure estimation
using Laplacian-regularized MAP-MRI and its application to HCP data."
NeuroImage (2016).
"""
S = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
S[i, j] = map_laplace_s(i, j)
T = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
T[i, j] = map_laplace_t(i, j)
U = np.zeros((radial_order + 1, radial_order + 1))
for i in range(radial_order + 1):
for j in range(radial_order + 1):
U[i, j] = map_laplace_u(i, j)
return S, T, U | 5,356,557 |
def _checker(word: dict):
"""checks if the 'word' dictionary is fine
:param word: the node in the list of the text
:type word: dict
:return: if "f", "ref" and "sig" in word, returns true, else, returns false
:rtype: bool
"""
if "f" in word and "ref" in word and "sig" in word:
return True
return False | 5,356,558 |
def test_creates_service(image, swarm, network, make_service):
"""Test that logging in as a new user creates a new docker service."""
test_logger.info("Start of service testing")
make_service(hub_service)
client = docker.from_env()
# jupyterhub service should be running at this point
services_before_spawn = client.services.list()
test_logger.info("Pre test services: {}".format(services_before_spawn))
username = "a-new-user"
password = "just magnets"
test_logger.info("Authenticating with user: {}".format(username))
assert wait_for_site(JHUB_URL) is True
with requests.Session() as s:
# login
test_logger.info("Authenticating with user: {}".format(username))
login_response = s.post(
JHUB_URL + "/hub/login?next=",
data={"username": username, "password": password},
)
test_logger.info("Login response message: {}".format(login_response.text))
assert login_response.status_code == 200
# Spawn a notebook
spawn_form_resp = s.get(JHUB_URL + "/hub/spawn")
test_logger.info("Spawn page message: {}".format(spawn_form_resp.text))
assert spawn_form_resp.status_code == 200
assert "Select a notebook image" in spawn_form_resp.text
payload = {"dockerimage": "nielsbohr/base-notebook:latest"}
spawn_resp = s.post(JHUB_URL + "/hub/spawn", data=payload)
test_logger.info("Spawn POST response message: {}".format(spawn_resp.text))
assert spawn_resp.status_code == 200
services = client.services.list()
test_logger.info("Post spawn services: {}".format(services))
# New services are there
assert len(services) > 0
for service in services:
while (
service.tasks() and service.tasks()[0]["Status"]["State"] != "running"
):
time.sleep(5)
state = service.tasks()[0]["Status"]["State"]
assert state != "failed"
# wait for user home
home_resp = s.get(JHUB_URL + "/user/{}/tree?".format(username))
assert home_resp.status_code == 200
# New services are there
services_after_spawn = set(client.services.list()) - set(services_before_spawn)
assert len(services_after_spawn) > 0
# Remove via the web interface
# Wait for the server to finish spawning
pending = True
num_wait, max_wait = 0, 15
while pending or num_wait > max_wait:
num_wait += 1
resp = s.delete(
JHUB_URL + "/hub/api/users/{}/server".format(username),
headers={"Referer": "127.0.0.1:{}/hub/".format(PORT)},
)
test_logger.info(
"Response from removing the user server: {}".format(resp.text)
)
if resp.status_code == 204:
pending = False
time.sleep(1)
assert resp.status_code == 204
# double check it is gone
services_after_remove = client.services.list()
assert len((set(services_before_spawn) - set(services_after_remove))) == 0
test_logger.info("End of test service") | 5,356,559 |
def construc_prob(history, window, note_set, model, datafilename):
"""
This function constructs the proabilities of seeing each next note
Inputs:
history, A list of strings, the note history in chronological order
window, and integer how far back we are looking
note_set, the set of notes to be considered
model, the model used to construct probabilities
datafilename, a string, the name of the file containing the information to convert strings of notes to interaction dummies
Outputs:
A list of probabilities of len(note_set)
"""
recent_history = history[len(history)-window + 1:len(history)]
like_prob = [] # Initialize a empty list of probabilities of liking a certain sequence
for note in note_set:
potential_hist = recent_history + [note]
X = create_X(potential_hist, datafilename)
# print(potential_hist)
# print(model(X))
like_prob.append(model(X))
return selection_prob(like_prob) | 5,356,560 |
def renderPybullet(envs, config, tensor=True):
"""Provides as much images as envs"""
if type(envs) is list:
obs = [
env_.render(
mode="rgb_array",
image_size=config["image_size"],
color=config["color"],
fpv=config["fpv"],
camera_id=0,
)
for env_ in envs
]
obs = np.array(obs).transpose(0, 3, 1, 2) / 255.0
else:
obs = envs.render(
mode="rgb_array",
image_size=config["image_size"],
color=config["color"],
fpv=config["fpv"],
camera_id=0,
)
obs = obs.transpose(2, 0, 1) / 255.0
if tensor:
obs = obs[None]
return obs | 5,356,561 |
def wifi(request):
"""Collect status information for wifi and return HTML response."""
context = {
'refresh': 5,
'item': '- Wifi',
'timestamp': timestamp(),
'wifi': sorted(Wifi().aps),
}
return render(request, 'ulm.html', context) | 5,356,562 |
def genModel( nChars, nHidden, numLayers = 1, dropout = 0.5, recurrent_dropout = 0.5 ):
"""Generates the RNN model with nChars characters and numLayers hidden units with
dimension nHidden."""
model = Sequential()
model.add( LSTM( nHidden, input_shape = (None, nChars), return_sequences = True,
dropout = dropout, recurrent_dropout = recurrent_dropout ) )
for _ in range( numLayers - 1 ):
model.add( LSTM( nHidden, return_sequences = True,
dropout = dropout, recurrent_dropout = recurrent_dropout ) )
model.add( TimeDistributed( Dense(nChars) ) )
model.add( Activation('softmax') )
model.compile( loss = "categorical_crossentropy", optimizer = "adam" )
return model | 5,356,563 |
def print_diff_trials(diff, skip=None):
"""Print diff of basic trial information"""
skip = skip or set()
for key, values in viewitems(diff.trial):
if key not in skip:
print(" {} changed from {} to {}".format(
key.capitalize().replace("_", " "),
values[0] or "<None>", values[1] or "<None>"))
print() | 5,356,564 |
def rotate_system(shape_list, angle, center_point = None):
"""Rotates a set of shapes around a given point
If no center point is given, assume the center of mass of the shape
Args:
shape_list (list): A list of list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [rotate_polygon(s, angle, center_point) for s in shape_list] | 5,356,565 |
def _large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using tf.float16
Args:
tensor_type: a dtype to determine the type.
Returns:
a large negative number.
"""
if tensor_type == tf.float16:
return tf.float16.min
return -1e9 | 5,356,566 |
def classified_unread_counts():
"""
Unread counts return by
helper.classify_unread_counts function.
"""
return {
'all_msg': 12,
'all_pms': 8,
'unread_topics': {
(1000, 'Some general unread topic'): 3,
(99, 'Some private unread topic'): 1
},
'unread_pms': {
1: 2,
2: 1,
},
'unread_huddles': {
frozenset({1001, 11, 12}): 3,
frozenset({1001, 11, 12, 13}): 2
},
'streams': {
1000: 3,
99: 1
}
} | 5,356,567 |
def company_key(company_name=DEFAULT_COMPANY_NAME):
"""Constructs a Datastore key for a Company entity with company_name."""
return ndb.Key('Company', company_name) | 5,356,568 |
def main():
"""Main entry point"""
fire.Fire({"build_index": build_index, "tune_index": tune_index, "score_index": score_index}) | 5,356,569 |
def cappath_config_writer(cappath=None, homepath=None):
"""
Write a ConfigParser file to store cap preferences.
:param cappath: Method to use.
:type cappath: str
:param homepath: Folder containing ini file. Default is user directory.
:type homepath: str
"""
cappath = grab_cap() if cappath is None else cappath
results = {"path": cappath}
iniconfig.generic_writer("cappath", results, homepath) | 5,356,570 |
def to_n_class(digit_lst, data, labels):
"""to make a subset of MNIST dataset, which has particular digits
Parameters
----------
digit_lst : list
for example, [0,1,2] or [1, 5, 8]
data : numpy.array, shape (n_samples, n_features)
labels : numpy.array or list of str
Returns
-------
numpy.array, list of int
"""
if not set(digit_lst) <= set(range(10)):
raise ValueError
indices = []
new_labels = []
for i, x in enumerate(data):
for digit in digit_lst:
if labels[i] == str(digit):
indices.append(i)
new_labels.append(digit)
return data[indices], new_labels | 5,356,571 |
def acosh(x: T.Tensor) -> T.Tensor:
"""
Elementwise inverse hyperbolic cosine of a tensor.
Args:
x (greater than 1): A tensor.
Returns:
tensor: Elementwise inverse hyperbolic cosine.
"""
y = numpy.clip(x,1+T.EPSILON, numpy.inf)
return ne.evaluate('arccosh(y)') | 5,356,572 |
def main():
"""
Load the network and parse the output.
:return: None
"""
# Grab command line args
args = build_argparser().parse_args()
# Connect to the MQTT server
client = connect_mqtt()
# Perform inference on the input stream
infer_on_stream(args, client)
#time_took = time.perf_counter() - start
#print(f"Time took: {time_took:.6f}s") | 5,356,573 |
def _output_server(host, port):
"""
Print info about the current instance
of SwampDragon
"""
print('-------- SwampDragon ------')
print('Running SwampDragon on {}:{}'.format(host, port))
print('DRAGON_URL: {}'.format(settings.DRAGON_URL))
print('Version {}'.format('.'.join([str(v) for v in VERSION])))
print('Debug: {}'.format(settings.DEBUG))
print('Quit the server with ctrl+c')
print('---------------------------') | 5,356,574 |
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('replace',
metavar='str',
help='The string that will be inserted')
return parser.parse_args() | 5,356,575 |
def run_unit_tests():
""" Run unit tests against installed tools rpms """
# At the time of this writing, no unit tests exist.
# A unit tests script will be run so that unit tests can easily be modified
print "Running unit tests..."
success, output = run_cli_cmd(["/bin/sh", UNIT_TEST_SCRIPT], False)
return success, output | 5,356,576 |
def autodetect_binary(argv, config):
"""Detects the correct binary to run and sets BINARY_SSH accordingly,
if it is not already set."""
# If BINARY_SSH is set by the user, respect that and do nothing.
if config.get("BINARY_SSH"):
config.print("Will run '{0}' as ssh binary - set by user via BINARY_SSH"
.format(config.get("BINARY_SSH")), loglevel=LOG_DEBUG)
return
# If BINARY_DIR is set, look for the binary in this directory.
runtime_name = argv[0]
if config.get("BINARY_DIR"):
binary_name = os.path.basename(runtime_name)
binary_path = os.path.join(config.get("BINARY_DIR"), binary_name)
if not os.path.isfile(binary_path) or not os.access(binary_path, os.X_OK):
binary_path = os.path.join(config.get("BINARY_DIR"), "ssh")
config.set("BINARY_SSH", binary_path)
config.print("Will run '{0}' as ssh binary - detected based on BINARY_DIR"
.format(config.get("BINARY_SSH")), loglevel=LOG_DEBUG)
return
# argv[0] could be pretty much anything the caller decides to set
# it to: an absolute path, a relative path (common in older systems),
# or even something entirely unrelated.
#
# Similar is true for __file__, which might even represent a location
# that is entirely unrelated to how repassh was found.
#
# Consider also that there might be symlinks / hard links involved.
#
# The logic here is pretty straightforward:
# - Try to eliminate the path of repassh from PATH.
# - Search for a binary with the same name of repassh to run.
#
# If this fails, we may end up in some sort of loop, where repassh
# tries to run itself. This should normally be detected later on,
# where the code checks for the next binary to run.
#
# Note also that users may not be relying on having repassh in the
# PATH at all - for example, with "rsync -e '/path/to/repassh' ..."
binary_name = os.path.basename(runtime_name)
ssh_ident_path = ""
if not os.path.dirname(runtime_name):
message = textwrap.dedent("""\
argv[0] ("{0}") is a relative path. This means that repassh does
not know its own directory, and can't exclude it from searching it
in $PATH:
PATH="{1}"
This may result in a loop, with 'repassh' trying to run itself.
It is recommended that you set BINARY_SSH, BINARY_DIR, or run
repassh differently to prevent this problem.""")
config.print(message.format(runtime_name, os.environ['PATH']),
loglevel=LOG_INFO)
else:
ssh_ident_path = os.path.abspath(os.path.dirname(runtime_name))
# Remove the path containing the repassh symlink (or whatever) from
# the search path, so we do not cause an infinite loop.
# Note that:
# - paths in PATH may be not-normalized, example: "/usr/bin/../foo",
# or "/opt/scripts///". Normalize them before comparison.
# - paths in PATH may be repeated multiple times. We have to exclude
# all instances of the repassh path.
normalized_path = [
os.path.normpath(p) for p in os.environ['PATH'].split(os.pathsep)]
search_path = os.pathsep.join([
p for p in normalized_path if p != ssh_ident_path])
# Find an executable with the desired name.
binary_path = shutil.which(binary_name, path=search_path)
if not binary_path:
# Nothing found. Try to find something named 'ssh'.
binary_path = shutil.which('ssh')
if binary_path:
config.set("BINARY_SSH", binary_path)
config.print("Will run '{0}' as ssh binary - detected from argv[0] and $PATH"
.format(config.get("BINARY_SSH")), loglevel=LOG_DEBUG)
else:
message = textwrap.dedent("""\
repassh was invoked in place of the binary {0} (determined from argv[0]).
Neither this binary nor 'ssh' could be found in $PATH.
PATH="{1}"
You need to adjust your setup for repassh to work: consider setting
BINARY_SSH or BINARY_DIR in your config, or running repassh some
other way.""")
config.print(message.format(argv[0], os.environ['PATH']), loglevel=LOG_ERROR)
config.exit(255) | 5,356,577 |
def encode_input_descr(prm):
""" Encode process description input."""
elem = NIL("Input", *_encode_param_common(prm))
elem.attrib["minOccurs"] = ("1", "0")[bool(prm.is_optional)]
elem.attrib["maxOccurs"] = "1"
if isinstance(prm, LiteralData):
elem.append(_encode_literal(prm, True))
elif isinstance(prm, ComplexData):
elem.append(_encode_complex(prm, True))
elif isinstance(prm, BoundingBoxData):
elem.append(_encode_bbox(prm, True))
return elem | 5,356,578 |
def eval_src(encoder, classifier, data_loader):
"""Evaluate classifier for source domain."""
# set eval state for Dropout and BN layers
encoder.eval() #eval验证模式不启用Dropout和BatchNormalization
classifier.eval()
# init loss and accuracy
# loss = 0
# acc = 0
loss_1 = 0
loss_2 = 0
loss_3 = 0
acc1 = 0
acc2 = 0
acc3 = 0
#my
criterion = nn.MSELoss()
# evaluate network
for (images, labels) in data_loader:
images = make_variable(images, volatile=True)
labels = make_variable(labels)
# print('标签:',labels)
# print('标签:',labels.shape)
preds = classifier(encoder(images)) #.squeeze()
# print('预测值是:',preds.shape)
# print('预测值是:',preds)
# loss += criterion(preds, labels).item() #data[0]6
loss_1 += criterion(preds[:,0], labels[:,0]).item()
loss_2 += criterion(preds[:,1], labels[:,1]).item()
loss_3 += criterion(preds[:,2], labels[:,2]).item()
# pred_cls = preds.data.max(1)[1] #返回每一行最大值所在的索引(我的不需要,因为分类器(即我的回归器)直接输出一个结果)
# acc += pred_cls.eq(labels.data).cpu().sum()
# acc += ((preds - labels) ** 2).cpu().sum()
acc1 += ((preds[:,0] - labels[:,0]) ** 2).cpu().sum()
acc2 += ((preds[:,1] - labels[:,1]) ** 2).cpu().sum()
acc3 += ((preds[:,2] - labels[:,2]) ** 2).cpu().sum()
# loss /= len(data_loader)
# acc /= len(data_loader.dataset)
loss_1 /= len(data_loader)
loss_2 /= len(data_loader)
loss_3 /= len(data_loader)
acc1 /= len(data_loader.dataset)
acc2 /= len(data_loader.dataset)
acc3 /= len(data_loader.dataset)
# print("Avg Loss = {}, Avg Accuracy = {}".format(loss, acc))
print('Avg loss1: {}, Avg loss2: {}, Avg loss3: {}'.format(loss_1,loss_2,loss_3))
print('Avg Acc1: {}, Avg Acc2: {}, Avg Acc3: {}'.format(acc1, acc2, acc3)) | 5,356,579 |
def fill_replay_buffer(
env, replay_buffer: ReplayBuffer, desired_size: int, agent: Agent
):
"""Fill replay buffer with transitions until size reaches desired_size."""
assert (
0 < desired_size and desired_size <= replay_buffer._replay_capacity
), f"It's not true that 0 < {desired_size} <= {replay_buffer._replay_capacity}."
assert replay_buffer.size < desired_size, (
f"Replay buffer already has {replay_buffer.size} elements. "
f"(more than desired_size = {desired_size})"
)
logger.info(
f" Starting to fill replay buffer using policy to size: {desired_size}."
)
post_step = add_replay_buffer_post_step(replay_buffer, env=env)
agent.post_transition_callback = post_step
max_episode_steps = env.max_steps
with tqdm(
total=desired_size - replay_buffer.size,
desc=f"Filling replay buffer from {replay_buffer.size} to size {desired_size}",
) as pbar:
mdp_id = 0
while replay_buffer.size < desired_size:
last_size = replay_buffer.size
max_steps = desired_size - replay_buffer.size
if max_episode_steps is not None:
max_steps = min(max_episode_steps, max_steps)
run_episode(env=env, agent=agent, mdp_id=mdp_id, max_steps=max_steps)
size_delta = replay_buffer.size - last_size
# The assertion below is commented out because it can't
# support input samples which has seq_len>1. This should be
# treated as a bug, and need to be fixed in the future.
# assert (
# size_delta >= 0
# ), f"size delta is {size_delta} which should be non-negative."
pbar.update(n=size_delta)
mdp_id += 1
if size_delta <= 0:
# replay buffer size isn't increasing... so stop early
break
if replay_buffer.size >= desired_size:
logger.info(f"Successfully filled replay buffer to size: {replay_buffer.size}!")
else:
logger.info(
f"Stopped early and filled replay buffer to size: {replay_buffer.size}."
) | 5,356,580 |
def pytest_runtest_call(item):
"""Before the test item is called."""
try:
request = item._request
except AttributeError:
# pytest-pep8 plugin passes Pep8Item here during tests.
return
factoryboy_request = request.getfixturevalue("factoryboy_request")
factoryboy_request.evaluate(request)
assert not factoryboy_request.deferred
request.config.hook.pytest_factoryboy_done(request=request) | 5,356,581 |
def poly_quo(f, g, *symbols):
"""Returns polynomial quotient. """
return poly_div(f, g, *symbols)[0] | 5,356,582 |
def preprocess_data(dataset, encoder, config):
"""
Function to perform 4 preprocessing steps:
1. Exclude classes below minimum threshold defined in config.threshold
2. Exclude all classes that are not referenced in encoder.classes
3. Encode and normalize data into (path: str, label: int) tuples
4. Partition data samples into fractional splits defined in config.data_splits_meta
Parameters
----------
dataset : BaseDataset
Any instance of BaseDataset or its subclasses
encoder : LabelEncoder
Description of parameter `encoder`.
config : Namespace or stuf.stuf
Config object containing the attributes/properties:
config.threshold
config.data_splits_meta
Returns
-------
dict
Dictionary mapping from keys defined in config.data_splits_meta.keys(), to lists of tuples representing each sample.
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>> dataset = LeavesDataset()
... encoder = LabelEncoder(dataset.data.family)
... data_splits = preprocess_data(dataset, encoder, config)
"""
dataset.exclude_rare_classes(threshold=config.threshold)
encoder.encoder = dataset.classes
dataset, _ = dataset.enforce_class_whitelist(class_names=encoder.classes)
x = list(dataset.data['path'].values)#.reshape((-1,1))
y = np.array(encoder.encode(dataset.data['family']))
# import pdb;pdb.set_trace()
shuffled_data = list(zip(x,y))
random.shuffle(shuffled_data)
partitioned_data = partition_data(data=shuffled_data,
partitions=OrderedDict(config.data_splits_meta)
)
return {k:v for k,v in partitioned_data.items() if len(v)>0} | 5,356,583 |
def connect(ip,
_initialize=True,
wait_ready=None,
timeout=30,
still_waiting_callback=default_still_waiting_callback,
still_waiting_interval=1,
status_printer=None,
vehicle_class=None,
rate=4,
baud=115200,
heartbeat_timeout=30,
source_system=255,
source_component=0,
use_native=False):
"""
Returns a :py:class:`Vehicle` object connected to the address specified by string parameter ``ip``.
Connection string parameters (``ip``) for different targets are listed in the :ref:`getting started guide <get_started_connecting>`.
The method is usually called with ``wait_ready=True`` to ensure that vehicle parameters and (most) attributes are
available when ``connect()`` returns.
.. code:: python
from dronekit import connect
# Connect to the Vehicle using "connection string" (in this case an address on network)
vehicle = connect('127.0.0.1:14550', wait_ready=True)
:param String ip: :ref:`Connection string <get_started_connecting>` for target address - e.g. 127.0.0.1:14550.
:param Bool/Array wait_ready: If ``True`` wait until all default attributes have downloaded before
the method returns (default is ``None``).
The default attributes to wait on are: :py:attr:`parameters`, :py:attr:`gps_0`,
:py:attr:`armed`, :py:attr:`mode`, and :py:attr:`attitude`.
You can also specify a named set of parameters to wait on (e.g. ``wait_ready=['system_status','mode']``).
For more information see :py:func:`Vehicle.wait_ready <Vehicle.wait_ready>`.
:param status_printer: (deprecated) method of signature ``def status_printer(txt)`` that prints
STATUS_TEXT messages from the Vehicle and other diagnostic information.
By default the status information is handled by the ``autopilot`` logger.
:param Vehicle vehicle_class: The class that will be instantiated by the ``connect()`` method.
This can be any sub-class of ``Vehicle`` (and defaults to ``Vehicle``).
:param int rate: Data stream refresh rate. The default is 4Hz (4 updates per second).
:param int baud: The baud rate for the connection. The default is 115200.
:param int heartbeat_timeout: Connection timeout value in seconds (default is 30s).
If a heartbeat is not detected within this time an exception will be raised.
:param int source_system: The MAVLink ID of the :py:class:`Vehicle` object returned by this method (by default 255).
:param int source_component: The MAVLink Component ID fo the :py:class:`Vehicle` object returned by this method (by default 0).
:param bool use_native: Use precompiled MAVLink parser.
.. note::
The returned :py:class:`Vehicle` object acts as a ground control station from the
perspective of the connected "real" vehicle. It will process/receive messages from the real vehicle
if they are addressed to this ``source_system`` id. Messages sent to the real vehicle are
automatically updated to use the vehicle's ``target_system`` id.
It is *good practice* to assign a unique id for every system on the MAVLink network.
It is possible to configure the autopilot to only respond to guided-mode commands from a specified GCS ID.
The ``status_printer`` argument is deprecated. To redirect the logging from the library and from the
autopilot, configure the ``dronekit`` and ``autopilot`` loggers using the Python ``logging`` module.
:returns: A connected vehicle of the type defined in ``vehicle_class`` (a superclass of :py:class:`Vehicle`).
"""
from dronekit.mavlink import MAVConnection
if not vehicle_class:
vehicle_class = Vehicle
handler = MAVConnection(ip, baud=baud, source_system=source_system, source_component=source_component, use_native=use_native)
vehicle = vehicle_class(handler)
if status_printer:
vehicle._autopilot_logger.addHandler(ErrprinterHandler(status_printer))
if _initialize:
vehicle.initialize(rate=rate, heartbeat_timeout=heartbeat_timeout)
if wait_ready:
if wait_ready is True:
vehicle.wait_ready(still_waiting_interval=still_waiting_interval,
still_waiting_callback=still_waiting_callback,
timeout=timeout)
else:
vehicle.wait_ready(*wait_ready)
return vehicle | 5,356,584 |
def load_test_environment(skill):
"""Load skill's test environment if present
Arguments:
skill (str): path to skill root folder
Returns:
Module if a valid test environment module was found else None
"""
test_env = None
test_env_path = os.path.join(skill, 'test/__init__.py')
if exists(test_env_path):
skill_env = skill + '.test_env'
spec = importlib.util.spec_from_file_location(skill_env, test_env_path)
module = importlib.util.module_from_spec(spec)
sys.modules[skill_env] = module
spec.loader.exec_module(module)
if (hasattr(module, 'test_runner') and
callable(module.test_runner) or
hasattr(module, 'test_setup') and
callable(module.test_setup)):
test_env = module
return test_env | 5,356,585 |
async def ready(request):
"""
For Kubernetes readiness probe,
"""
try:
# check redis valid.
if app.redis_pool:
await app.redis_pool.save('health', 'ok', 1)
# check mysql valid.
if app.mysql_pool:
sql = "SELECT 666"
result = await app.mysql_pool.fetchone(sql)
if result is None:
raise ServerError(error='内部错误', code='10500', message="msg")
except Exception as e:
raise ServerError(error='内部错误', code='10500', message="msg")
return json({
'pong': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'version': app.config['API_VERSION']
}) | 5,356,586 |
def evaluate_partition(graph: Graph, true_b: np.ndarray, alg_partition: BlockState, evaluation: Evaluation):
"""Evaluate the output partition against the truth partition and report the correctness metrics.
Compare the partitions using only the nodes that have known truth block assignment.
Parameters
----------
graph : Graph
the graph which was partitioned.
true_b : ndarray (int)
array of truth block assignment for each node. If the truth block is not known for a node, -1 is used
to indicate unknown blocks.
alg_partition : BlockState
the partition result returned by stochastic block partitioning.
evaluation : Evaluation
stores evaluation results
Returns
------
evaluation : Evaluation
the evaluation results, filled in with goodness of partitioning measures
"""
alg_b = alg_partition.get_blocks().get_array()
evaluation.full_graph_description_length = alg_partition.entropy()
evaluation.max_full_graph_description_length = BlockState(
graph, graph.new_vertex_property("int", np.arange(graph.num_vertices()))).entropy()
evaluation.full_graph_modularity = modularity(graph, alg_partition.get_blocks())
if np.unique(true_b).size != 1:
contingency_table, N = create_contingency_table(true_b, alg_b, evaluation)
evaluation.contingency_table = contingency_table
joint_prob = evaluate_accuracy(contingency_table, evaluation)
evaluate_pairwise_metrics(contingency_table, N, evaluation)
evaluate_entropy_metrics(joint_prob, evaluation)
else:
evaluation.num_blocks_algorithm = max(alg_b) + 1
evaluation.save() | 5,356,587 |
def surface_area(polygon_mesh):
""" Computes the surface area for a polygon mesh.
Parameters
----------
polygon_mesh : ``PolygonMesh`` object
Returns
-------
result : surface area
"""
if isinstance(polygon_mesh, polygonmesh.FaceVertexMesh):
print("A FaceVertex Mesh")
result = 0.0
for face in polygon_mesh.faces:
v1, v2, v3 = face
result += 0.5 * abs(np.linalg.norm(
np.cross(
polygon_mesh.vertices[v2]-polygon_mesh.vertices[v1],
polygon_mesh.vertices[v3]-polygon_mesh.vertices[v1] )))
return result
return None | 5,356,588 |
def main():
""" main program """
# setup the argument parsing
parser = argparse.ArgumentParser(
description='Program to optimize receptors for the given parameters. '
'Note that most parameters can take multiple values, in '
'which case all parameter combinations are computed.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-Ns', nargs='+', type=int, required=True,
default=argparse.SUPPRESS, help='number of substrates')
parser.add_argument('-Nr', nargs='+', type=int, required=True,
default=argparse.SUPPRESS, help='number of receptors')
parser.add_argument('--mixture-scheme', type=str,
default='random_uniform',
choices=['const', 'linear', 'random_uniform'],
help='scheme for picking substrate probabilities')
parser.add_argument('-m', '--mixture-size', metavar='M', nargs='+',
type=float, required=True, default=argparse.SUPPRESS,
help='average number of substrates per mixture')
parser.add_argument('--correlation-scheme', type=str, default='const',
choices=['const', 'random_binary', 'random_uniform',
'random_normal'],
help='scheme for picking substrate correlations')
parser.add_argument('--correlation-magnitude', '-corr', metavar='C',
nargs='+', type=float, default=[0],
help='magnitude of the substrate correlations')
conc_dists = LibrarySparseNumeric.concentration_distributions
parser.add_argument('--concentration-distribution', type=str,
default='exponential', choices=conc_dists,
help='concentration distribution of ligands')
parser.add_argument('--concentration-scheme', type=str, default='const',
choices=['const', 'random_uniform'],
help='scheme for picking substrate concentrations')
parser.add_argument('--concentration-mean', '-conc', metavar='c',
nargs='+', type=float, default=[1],
help='mean concentration when ligand is present')
parser.add_argument('--concentration-var', '-conc-var', metavar='v',
nargs='+', type=float, default=[1],
help='variance of concentration when ligand is present')
parser.add_argument('--MI-method', type=str, default='numeric',
choices=['numeric', 'approx', 'approx-gaussian',
'approx-linear', 'fast'],
help='method for estimating the mutual information')
parser.add_argument('--optimization-scheme', type=str,
default='cma',
choices=['cma', 'cma-parallel', 'Nelder-Mead', 'BFGS'],
help='optimization scheme to use')
parser.add_argument('--optimization-info', action='store_true',
default=False,
help='store extra information about the optimization')
parser.add_argument('-s', '--steps', nargs='+', type=int, default=[100000],
help='steps in simulated annealing')
parser.add_argument('-r', '--repeat', type=int, default=1,
help='number of repeats for each parameter set')
cpus = mp.cpu_count()
parser.add_argument('-p', '--parallel', action='store', nargs='?',
default=1, const=cpus, type=int,
help='use multiple processes. %d processes are used if '
'only -p is given, without the number.' % cpus)
parser.add_argument('-q', '--quite', action='store_true',
default=False, help='silence the output')
parser.add_argument('--progress', action='store_true',
help='display some progress output', default=False)
parser.add_argument('-f', '--filename', default='result.pkl',
help='filename of the result file')
# fetch the arguments and build the parameter list
args = parser.parse_args()
arg_list = (args.Ns, args.Nr, args.mixture_size, args.concentration_mean,
args.concentration_var, args.correlation_magnitude, args.steps,
range(args.repeat))
# determine the number of jobs
job_count = 1
for arg in arg_list:
job_count *= len(arg)
# build a list with all the jobs
job_list = [{'Ns': Ns, 'Nr': Nr,
'mixture-scheme': args.mixture_scheme,
'mixture-size': m,
'concentration-distribution': args.concentration_distribution,
'concentration-scheme': args.concentration_scheme,
'concentration-mean': conc_mean,
'concentration-var': conc_var,
'correlation-scheme': args.correlation_scheme,
'correlation-magnitude': corr,
'MI-method': args.MI_method,
'optimization-scheme': args.optimization_scheme,
'optimization-info': args.optimization_info,
'steps': steps,
'quite': args.quite,
'job_count': job_count, 'progress': args.progress}
for Ns, Nr, m, conc_mean, conc_var, corr, steps, _
in itertools.product(*arg_list)]
# do the optimization
if args.parallel > 1 and len(job_list) > 1:
results = mp.Pool(args.parallel).map(optimize_library, job_list)
else:
results = list(map(optimize_library, job_list))
# write the pickled result to file
with open(args.filename, 'wb') as fp:
pickle.dump(results, fp, pickle.HIGHEST_PROTOCOL) | 5,356,589 |
def add(value, next):
""" Adds specified ``value`` to each item passed to pipeline """
while True:
item = yield
next.send(item + value) | 5,356,590 |
def gas_zfactor(T_pr, P_pr):
"""
Calculate Gas Compressibility Factor
For range: 0.2 < P_pr < 30; 1 < T_pr < 3 (error 0.486%)
(Dranchuk and Aboukassem, 1975)
"""
# T_pr : calculated pseudoreduced temperature
# P_pr : calculated pseudoreduced pressure
from scipy.optimize import fsolve # non-linear solver
import numpy as np
if T_pr > 1 and T_pr < 3 and P_pr > 0.2 and P_pr < 30:
a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475
a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210
def f(y):
rho_pr, z = y
c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))
c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))
c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))
c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))
f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1
f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))
return[f1, f2]
pseudo_rho, z_factor = fsolve(f, [1, 1]) # initial guess
else:
pseudo_rho, z_factor = np.nan, np.nan
return(pseudo_rho, z_factor) | 5,356,591 |
def download_huc4(HUC4, filename):
"""Download HUC4 geodatabase (flowlines and boundaries) from NHD Plus HR data distribution site
Parameters
----------
HUC4 : str
HUC4 ID code
filename : str
output filename. Will always overwrite this filename.
"""
with requests.get(DATA_URL.format(HUC4=HUC4), stream=True) as r:
if not r.status_code == 200:
raise HTTPError("Could not download {}".format(HUC4))
with open(filename, "wb") as out:
print(
"Downloading HUC4: {HUC4} ({size:.2f} MB)".format(
HUC4=HUC4, size=int(r.headers["Content-Length"]) / 1024 ** 2
)
)
# Use a streaming copy to download the bytes of this file
copyfileobj(r.raw, out) | 5,356,592 |
def func_complex():
"""
complex(real, imag) return complex number with value real + imag*1j
or convert a string or number to complex number.
Examples: complex(4), complex(-1, -4), complex("1+3j")
"""
print '4 = {}'.format(complex(4))
print '"5+3j" = {}'.format(complex('5+3j'))
print '-6, -3 = {}'.format(complex(-6, -3)) | 5,356,593 |
def format_value_with_percentage(original_value):
"""
Return a value in percentage format from
an input argument, the original value
"""
percentage_value = "{0:.2%}".format(original_value)
return percentage_value | 5,356,594 |
def get_Z_and_extent(topofile):
"""Get data from an ESRI ASCII file."""
f = open(topofile, "r")
ncols = int(f.readline().split()[1])
nrows = int(f.readline().split()[1])
xllcorner = float(f.readline().split()[1])
yllcorner = float(f.readline().split()[1])
cellsize = float(f.readline().split()[1])
nodatavalue = float(f.readline().split()[1])
data = numpy.zeros((nrows, ncols), dtype=numpy.float64)
for i in range(nrows):
data[i, :] = f.readline().strip().split()
f.close()
extent = [xllcorner, xllcorner+ncols*cellsize,
yllcorner, yllcorner+nrows*cellsize]
return data, extent | 5,356,595 |
def modified_config(
file_config: submanager.models.config.ConfigPaths,
request: pytest.FixtureRequest,
) -> submanager.models.config.ConfigPaths:
"""Modify an existing config file and return the path."""
# Get and check request params
request_param = getattr(request, PARAM_ATTR, None)
if request_param is None:
raise ValueError("Update dict must be passed via request param")
if isinstance(request_param, Sequence):
update_dict, disable_all = request_param
else:
update_dict = request_param
disable_all = False
if not isinstance(update_dict, MutableMapping):
raise TypeError(
f"Update dict {update_dict!r} must be a mapping, "
f"not {type(update_dict)!r}",
)
# Disable all items if requested
config_data = submanager.config.utils.load_config(file_config.static)
if disable_all:
config_data_modified = (
submanager.utils.dicthelpers.process_items_recursive(
dict(config_data),
fn_torun=lambda value: False,
keys_match={"enabled"},
inplace=False,
)
)
if isinstance(disable_all, str):
config_data_level = config_data_modified
for key in disable_all.split("."):
config_data_level = config_data_level[key]
if config_data_level.get("enabled", None) is not None:
config_data_level["enabled"] = True
else:
config_data_modified = copy.deepcopy(dict(config_data))
# Modify config and write it back
config_data_modified = submanager.utils.dicthelpers.update_recursive(
base=config_data_modified,
update=dict(update_dict),
inplace=False,
)
submanager.config.utils.write_config(
config_data_modified,
config_path=file_config.static,
)
return file_config | 5,356,596 |
def train_discrim(discrim, state_features, actions, optim, demostrations,
settings):
"""demostractions: [state_features|actions]
"""
criterion = torch.nn.BCELoss()
for _ in range(settings.VDB_UPDATE_NUM):
learner = discrim(torch.cat([state_features, actions], dim=-1))
expert = discrim(demostrations)
discrim_loss = criterion(learner, torch.ones(
[len(state_features), 1])) + criterion(
expert, torch.zeros(len(demostrations), 1))
optim.zero_grad()
discrim_loss.backward()
optim.step()
expert_acc = ((discrim(demostrations) < 0.5).float()).mean()
learner_acc = ((discrim(torch.cat([state_features, actions], dim=1)) >
0.5).float()).mean()
return expert_acc, learner_acc | 5,356,597 |
def task_install():
"""install the packages into the sys.packages"""
def install(pip):
if pip:
name = get_name()
assert not doit.tools.CmdAction(
f"python -m pip install --find-links=dist --no-index --ignore-installed --no-deps {name}"
).execute(sys.stdout, sys.stderr)
elif PYPROJECT_TOML.exists():
backend = build_backend()
if backend == "flit_core":
needs("flit")
assert not doit.tools.CmdAction("flit install").execute(
sys.stdout, sys.stderr
)
elif backend == "poetry":
needs("poetry")
assert not doit.tools.CmdAction("poetry install").execute(
sys.stdout, sys.stderr
)
else:
assert not doit.tools.CmdAction("pip install . --no-deps").execute(
sys.stdout, sys.stderr
)
name, version = get_name(), get_version()
return Task(
file_dep=[
PYPROJECT_TOML,
to_whl(Path(), name, version),
to_sdist(Path(), name, version),
],
actions=[install],
task_dep=["build"],
params=[_DEVELOP, _PIP],
) | 5,356,598 |
def filter_column(text, column, start=0, sep=None, **kwargs):
""" Filters (like grep) lines of text according to a specified column and operator/value
:param text: a string
:param column: integer >=0
:param sep: optional separator between words (default is arbitrary number of blanks)
:param kwargs: operator=value eg eq='exact match', contains='substring', startswith='prefix' etc...
:return:
"""
if len(kwargs) != 1:
raise TypeError("Missing or too many keyword parameter in filter_column")
op, value = kwargs.items()[0]
if op in ('eq', 'equals'):
op = '__eq__'
elif op in ('contains', 'includes'):
op = '__contains__'
elif not op in ('startswith', 'endswith'):
raise ValueError("Unknown filter_column operator: {}".format(op))
lines = text.splitlines() if isinstance(text, basestring) else text
if start:
lines = lines[start:]
values = []
for line in lines:
elts = line.split(sep) if sep else line.split()
if elts and column < len(elts):
elt = elts[column]
if getattr(elt, op)(value):
values.append(line.strip())
return values | 5,356,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.