content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import math
import random
def naive_scheduler(task_qs, max_workers, old_worker_map, to_die_list, logger):
""" Return two items (as one tuple) dict kill_list :: KILL [(worker_type, num_kill), ...]
dict create_list :: CREATE [(worker_type, num_create), ...]
In this scheduler model, there is minimum 1 instance of each nonempty task queue.
"""
logger.debug("Entering scheduler...")
q_sizes = {}
q_types = []
new_worker_map = {}
# ## Added to disallow rescheduling workers we're waiting to spin down ## #
blocked_workers = 0
blocked_types = []
for w_type in to_die_list:
if to_die_list[w_type] > 0:
if old_worker_map is not None:
blocked_workers += old_worker_map[w_type] # These workers cannot be replaced.
blocked_types.append(w_type)
new_worker_map[w_type] = old_worker_map[w_type] # Keep the same.
# ## ****************************************************************# ## #
# Remove blocked workers from max workers.
max_workers -= blocked_workers
# Sum the size of each *available* (unblocked) task queue
sum_q_size = 0
for q_type in task_qs:
if q_type not in blocked_types:
q_types.append(q_type)
q_size = task_qs[q_type].qsize()
sum_q_size += q_size
q_sizes[q_type] = q_size
if sum_q_size > 0:
logger.info("[SCHEDULER] Total number of tasks is {}".format(sum_q_size))
# Set proportions of workers equal to the proportion of queue size.
for q_type in q_sizes:
ratio = q_sizes[q_type] / sum_q_size
new_worker_map[q_type] = int(math.floor(ratio * max_workers))
# CLEANUP: Assign the difference here to any random worker. Should be small.
difference = round(max_workers - sum(new_worker_map.values()))
logger.info("[SCHEDULER] Offset difference: {}".format(difference))
logger.info("[SCHEDULER] Queue Types: {}".format(q_types))
if len(q_types) > 0:
for i in range(difference):
win_q = random.choice(q_types)
new_worker_map[win_q] += 1
logger.debug(new_worker_map)
return new_worker_map
else:
return None | 801bfbf3dc9071b1e1f202f63452634e562edb14 | 3,655,100 |
def run(ceph_cluster, **kwargs) -> int:
"""
Method that executes the external test suite.
Args:
ceph_cluster The storage cluster participating in the test.
kwargs The supported keys are
config contains the test configuration
Returns:
0 - Success
1 - Failure
"""
LOG.info("Running RBD Sanity tests.")
config = kwargs["config"]
script_dir = config["script_path"]
script = config["script"]
branch = config.get("branch", "pacific")
nodes = config.get("nodes", [])
if nodes:
nodes = get_nodes_by_ids(ceph_cluster, nodes)
else:
# By default, tests would be executed on a single client node
nodes = [ceph_cluster.get_nodes(role="client")[0]]
for node in nodes:
one_time_setup(node, branch=branch)
cmd = f"cd ceph/{script_dir}; sudo bash {script}"
if script == "*":
cmd = f"cd ceph/{script_dir}; for test in $(ls); do sudo bash $test; done"
node.exec_command(cmd=cmd, check_ec=True, timeout=1200)
return 0 | a8107c35049d2edcb5d8f4f844e287ea7fcd1c81 | 3,655,101 |
import requests
import urllib
from bs4 import BeautifulSoup
def search_item(search_term, next=False, page=0, board=0):
"""function to search and return comments"""
if next == False:
page = requests.get("https://www.nairaland.com/search?q=" + urllib.parse.quote_plus(str(search_term)) + "&board="+str(board))
else:
page = requests.get("https://www.nairaland.com/search/"
+ str(search_term) + "/0/"+str(board)+"/0/1" + str(page))
soup = BeautifulSoup(page.content, 'html.parser')
comments = soup.findAll("div", {"class": "narrow"})
return comments | 7e2a72c9df82f204ac852b1c3028c6de8906594b | 3,655,102 |
def is_valid_action(state, x, y, direction):
"""
Checks if moving the piece at given x, y coordinates in the given direction is valid, given the current state.
:param state: the current state
:param x: the x coordinate of the piece
:param y: the y coordinate of the piece
:param direction: the direction to travel with this action
:return: True if the action is valid, False otherwise
"""
new_x = x + X_MOVEMENT_DIFFS[direction]
new_y = y + Y_MOVEMENT_DIFFS[direction]
return is_within_bounds(new_x, new_y) and is_free_square(state, new_x, new_y) | 9be9d6a16d6ec3f766ee7f91c08d3ced7d5ff6b8 | 3,655,103 |
def range_(minimum, maximum):
"""
A validator that raises a :exc:`ValueError` if the initializer is called
with a value that does not belong in the [minimum, maximum] range. The
check is performed using ``minimum <= value and value <= maximum``
"""
return _RangeValidator(minimum, maximum) | 27dc9c9c814371eb03b25f99be874e39d48c1a52 | 3,655,104 |
def sigmoid_prime(z):
"""Helper function for backpropagation"""
return sigmoid(z) * (1 - sigmoid(z)) | 13541050982152668cdcec728f3a913298f2aad8 | 3,655,105 |
def register_widget_util(ui_name, some_type, gen_widgets, apply_with_params):
"""
ui_name: the name of this utility in the UI
some_type: this utility will appear in the sidebar whenever your view function
returns a value of type ``some_type``
gen_widgets(val): a function that takes the report value (of the specified type), and
returns a list of widgets. These widget values will be passed like:
``apply_with_params(val, *widget_values)``.
apply_with_params: a function that takes the report value (of the specified type) as
its first parameter, followed by a list of arguments that are given by widgets. The function must
return the result of a call to ``file_response``
"""
def gen_html(val):
widgets = gen_widgets(val)
widget_data = widgets_template_data(widgets)
return render_template('utility_button.html', name=ui_name, widgets=widget_data)
def apply_util(val, data):
widgets = gen_widgets(val)
validate_widget_form_data(widgets, data)
inputs = parse_widget_form_data(widgets, data)
return apply_with_params(val, *inputs)
register_util_for_type(some_type, gen_html, apply_util) | 25273753e0e31472cc44ef3527200e9dfa797de2 | 3,655,106 |
from datetime import datetime
def _CreateSamplePostsubmitReport(manifest=None,
builder='linux-code-coverage',
modifier_id=0):
"""Returns a sample PostsubmitReport for testing purpose.
Note: only use this method if the exact values don't matter.
"""
manifest = manifest or _CreateSampleManifest()
return PostsubmitReport.Create(
server_host='chromium.googlesource.com',
project='chromium/src',
ref='refs/heads/main',
revision='aaaaa',
bucket='coverage',
builder=builder,
commit_timestamp=datetime(2018, 1, 1),
manifest=manifest,
summary_metrics=_CreateSampleCoverageSummaryMetric(),
build_id=123456789,
modifier_id=modifier_id,
visible=True) | 5c7bccda2648f4d8d26725e983a567d5b011dbb6 | 3,655,107 |
import os
def create_doc_term_matrix():
"""
Load document-term matrix from disk into memory
"""
df = None
if os.path.isfile(DOCTERM_PICKLE):
print('Saved dataframe found! Loading saved document-term matrix...')
df = pd.read_pickle(DOCTERM_PICKLE)
else:
print('Could not find saved document-term matrix, loading from scratch...')
df = pd.read_csv(DOCTERM_FPATH, index_col=0, keep_default_na=False)
# Re-map original doc-term words to stemmed words
stemmer = nltk.stem.porter.PorterStemmer()
row_names = df.index.tolist()
stem_queries = [stemmer.stem(name) for name in row_names]
df.index = pd.Index(stem_queries)
# Collapse duplicate rows
df = df.groupby(df.index).sum()
print('Saving as pickle file...')
df.to_pickle(DOCTERM_PICKLE)
return df | 3583dafc056397766e6c2491d4395cb35eb8ef84 | 3,655,108 |
import typing
def _fetch_measurement_stats_arrays(
ssc_s: typing.List[_NIScopeSSC],
scalar_measurements: typing.List[niscope.ScalarMeasurement],
):
"""
private function for fetching statics for selected functions.
Obtains a waveform measurement and returns the measurement value. This
method may return multiple statistical results depending on the number
of channels, the acquisition type, and the number of records you
specify.
You specify a particular measurement type, such as rise time, frequency,
or voltage peak-to-peak. The waveform on which the digitizer calculates
the waveform measurement is from an acquisition that you previously
initiated. The statistics for the specified measurement method are
returned, where the statistics are updated once every acquisition when
the specified measurement is fetched by any of the Fetch Measurement
methods. If a Fetch Measurement method has not been called, this
method fetches the data on which to perform the measurement. The
statistics are cleared by calling
clear_waveform_measurement_stats.
Many of the measurements use the low, mid, and high reference levels.
You configure the low, mid, and high references with
meas_chan_low_ref_level,
meas_chan_mid_ref_level, and
meas_chan_high_ref_level to set each channel
differently.
Args:
ssc_s (typing.List[_NIScopeSSC]): List of sessions for various channels in groups.
scalar_measurements (typing.List[niscope.ScalarMeasurement]): The list of scalar
measurement to be performed on each fetched waveform.
Returns:
list of measurement_stats (list of MeasurementStats): Returns a list of class instances
with the following measurement statistics about the specified measurement:
- **result** (float): the resulting measurement
- **mean** (float): the mean scalar value, which is obtained by
averaging each fetch_measurement_stats call
- **stdev** (float): the standard deviations of the most recent
**numInStats** measurements
- **min_val** (float): the smallest scalar value acquired (the minimum
of the **numInStats** measurements)
- **max_val** (float): the largest scalar value acquired (the maximum
of the **numInStats** measurements)
- **num_in_stats** (int): the number of times fetch_measurement_stats has been called
- **channel** (str): channel name this result was acquired from
- **record** (int): record number of this result
"""
stats: typing.List[niscope.MeasurementStats] = []
for ssc, scalar_meas_function in zip(ssc_s, scalar_measurements):
stats.append(
ssc.session.channels[ssc.channels].fetch_measurement_stats(scalar_meas_function)
) # function with unknown type
return stats | 4acad87bc9b0cd682725ea5edcade10d996653a1 | 3,655,109 |
def nativeMouseY(self):
"""
TOWRITE
:rtype: qreal
"""
scene = self.activeScene() # QGraphicsScene*
if scene:
qDebug("mouseY: %.50f" % -scene.property("SCENE_MOUSE_POINT").y()) # .toPointF().y())
if scene:
return -scene.property("SCENE_MOUSE_POINT").y() # .toPointF().y()
return 0.0 | dc0d4c1f0ff4ab1611ee68a35fd5dfb254a31566 | 3,655,110 |
def generate_repository_dependencies_folder_label_from_key( repository_name, repository_owner, changeset_revision, key ):
"""Return a repository dependency label based on the repository dependency key."""
if key_is_current_repositorys_key( repository_name, repository_owner, changeset_revision, key ):
label = 'Repository dependencies'
else:
label = "Repository <b>%s</b> revision <b>%s</b> owned by <b>%s</b>" % ( repository_name, changeset_revision, repository_owner )
return label | 5654b29354b07f9742ef1cdf20c313ecbcfec02f | 3,655,111 |
def weighted_characteristic_path_length(matrix):
"""Calculate the characteristic path length for weighted graphs."""
n_nodes = len(matrix)
min_distances = weighted_shortest_path(matrix)
sum_vector = np.empty(n_nodes)
for i in range(n_nodes):
# calculate the inner sum
sum_vector[i] = (1/(n_nodes-1)) * np.sum([min_distances[i, j] for j in range(n_nodes) if j != i])
return (1/n_nodes) * np.sum(sum_vector) | ff171ad9bf7a6968ebf9d41dd5c508bb8b39b16a | 3,655,112 |
import threading
import sqlite3
def execute_timeout(cnx, command, **kwargs):
"""Perform Sqlite3 command to be interrupted if running too long.
If the given command is a string, it is executed as SQL.
If the command is a callable, call it with the cnx and any given
keyword arguments.
Raises SystemError if interrupted by timeout.
"""
config = flask.current_app.config
event = threading.Event()
timeout = config["EXECUTE_TIMEOUT"]
args = (
cnx,
event,
timeout,
config["EXECUTE_TIMEOUT_INCREMENT"],
config["EXECUTE_TIMEOUT_BACKOFF"],
)
thread = threading.Thread(target=_timeout_interrupt, args=args)
thread.start()
event.set()
try:
if isinstance(command, str): # SQL
result = cnx.execute(command)
elif callable(command):
result = command(cnx, **kwargs)
except sqlite3.ProgrammingError:
raise
except sqlite3.OperationalError as error:
# This looks like a bug in the sqlite3 module:
# SQL syntax error should raise sqlite3.ProgrammingError,
# not sqlite3.OperationalError, which is what it does.
# That's why the error message has to be checked.
if str(error) == "interrupted":
raise SystemError(f"execution exceeded {timeout} seconds; interrupted")
else:
raise
event.clear()
thread.join()
return result | 57b0cc43fc5e9790a0cada0ca9fdd075e652bf41 | 3,655,113 |
def mean_IoU(threshold=0.5, center_crop=0, get_batch_mean=True):
"""
- y_true is a 3D array. Each channel represents the ground truth BINARY channel
- y_pred is a 3D array. Each channel represents the predicted BINARY channel
"""
def _f(y_true, y_pred):
y_true = fix_input(y_true)
y_pred = fix_input(y_pred)
y_true = get_binary_img(
y_true,
threshold=threshold,
center_crop=center_crop
)
y_pred = get_binary_img(
y_pred,
threshold=threshold,
center_crop=center_crop
)
inter = get_intersection(y_true, y_pred)
union = get_alls(y_true, y_pred) - inter
batch_metric = eps_divide(inter, union)
if get_batch_mean:
return K.mean(batch_metric, axis=-1)
return batch_metric
_f.__name__ = 'attila_metrics_{}'.format('mean_IoU')
return _f | 0c9ee55b694e11615bd6ab023ce1f43354b986b1 | 3,655,114 |
import csv
def ConvertCSVStringToList(csv_string):
"""Helper to convert a csv string to a list."""
reader = csv.reader([csv_string])
return list(reader)[0] | fa244d2a1c8c50b2b097883f964f1b5bb7ccf393 | 3,655,115 |
def get_section_range_pairs(orig_section, new_pdf):
"""Return MatchingSection for a section."""
other_section = new_pdf.find_corresponding_section(orig_section)
if not other_section:
print("Skipping section {} - no match in the other doc!".format(
orig_section.title))
return None
return MatchingSection(
title=orig_section.title,
orig_range=orig_section.pdf_diff_options,
new_range=other_section.pdf_diff_options) | ff1ef7bedcc0a1264a8cd191267d35a75c302eac | 3,655,116 |
import sqlite3
from typing import Any
import logging
def atomic_transaction(conn: sqlite3.Connection,
sql: str, *args: Any) -> sqlite3.Cursor:
"""Perform an **atomic** transaction.
The transaction is committed if there are no exceptions else the
transaction is rolled back.
Args:
conn: database connection
sql: formatted string
*args: arguments to use for parameter substitution
Returns:
sqlite cursor
"""
try:
c = transaction(conn, sql, *args)
except Exception as e:
logging.exception("Could not execute transaction, rolling back")
conn.rollback()
raise e
conn.commit()
return c | 9748a6e315521278c4dc60df891081dcd77c98b9 | 3,655,117 |
def convert_to_tensor(narray, device):
"""Convert numpy to tensor."""
return tf.convert_to_tensor(narray, tf.float32) | 699f4cbdad83bc72525d237549420e67d0d464f8 | 3,655,118 |
import logging
def get_config_origin(c):
"""Return appropriate configuration origin
Parameters
----------
c: Configuration
configuration to be examined
Returns
-------
origin: str
origin of configuration (e.g. "Local", "Random", etc.)
"""
if not c.origin:
origin = "Unknown"
elif c.origin.startswith("Local") or c.origin == 'Model based pick' or "sorted" in c.origin:
origin = "Acquisition Function"
elif c.origin.startswith("Random"):
origin = "Random"
else:
logging.getLogger("cave.utils.helpers").debug("Cannot interpret origin: %s", c.origin)
origin = "Unknown"
return origin | a52be755fe37f128c0e629fecfe3307d3bb69fff | 3,655,119 |
def get_instance_ip() -> str:
"""
For a given identifier for a deployment (env var of IDENTIFIER), find the cluster
that was deployed, find the tasks within the cluster (there should only be one),
find the network interfaces on that task, and return the public IP of the instance
:returns: str The public ip of the remote instance
"""
ecs_c = boto3.client("ecs")
task_arns = ecs_c.list_tasks(
cluster=f"remote-cluster-{IDENTIFIER}", desiredStatus="RUNNING"
)["taskArns"]
if task_arns:
tasks = ecs_c.describe_tasks(
cluster=f"remote-cluster-{IDENTIFIER}", tasks=task_arns
)["tasks"]
# Should only ever be one task and network interface on deployment
task_details = {
d["name"]: d["value"] for d in tasks[0]["attachments"][0]["details"]
}
interface_id = task_details["networkInterfaceId"]
ec2_c = boto3.client("ec2")
network_interfaces = ec2_c.describe_network_interfaces(
NetworkInterfaceIds=[interface_id]
)["NetworkInterfaces"]
return network_interfaces[0]["Association"]["PublicIp"]
else:
return None | 6b52f6c385e2d96e500458397465f67550a0deeb | 3,655,120 |
def is_hign_level_admin():
"""超级管理员"""
return is_admin() and request.user.level == 1 | 7faa7872f556307b67afd1e5604c104aa6aa242d | 3,655,121 |
def object_metadata(save_path):
"""Retrieves information about the objects in a checkpoint.
Example usage:
```python
object_graph = tf.contrib.checkpoint.object_metadata(
tf.train.latest_checkpoint(checkpoint_directory))
ckpt_variable_names = set()
for node in object_graph.nodes:
for attribute in node.attributes:
ckpt_variable_names.add(attribute.full_name)
```
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`.
Returns:
A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.
Raises:
ValueError: If an object graph was not found in the checkpoint.
"""
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
raise ValueError(
('The specified checkpoint "%s" does not appear to be object-based (it '
'is missing the key "%s"). Likely it was created with a name-based '
"saver and does not contain an object dependency graph.") %
(save_path, base.OBJECT_GRAPH_PROTO_KEY))
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
return object_graph_proto | 8a01cc2a60298a466921c81144bfbd9c4e43aa97 | 3,655,122 |
async def login(_request: Request, _user: User) -> response.HTTPResponse:
"""
Login redirect
"""
return redirect(app.url_for("pages.portfolios")) | 375452df081f619db9c887359b6bb0217aa8e802 | 3,655,123 |
def delete_source(source_uuid: SourceId, database: Database):
"""Delete a source."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = SourceData(data_model, reports, source_uuid)
delta_description = (
f"{{user}} deleted the source '{data.source_name}' from metric "
f"'{data.metric_name}' of subject '{data.subject_name}' in report '{data.report_name}'."
)
uuids = [data.report_uuid, data.subject_uuid, data.metric_uuid, source_uuid]
del data.metric["sources"][source_uuid]
return insert_new_report(database, delta_description, (data.report, uuids)) | f160a8304df54a20026155f1afb763c0077d05a9 | 3,655,124 |
def find_object_with_matching_attr(iterable, attr_name, value):
"""
Finds the first item in an iterable that has an attribute with the given name and value. Returns
None otherwise.
Returns:
Matching item or None
"""
for item in iterable:
try:
if getattr(item, attr_name) == value:
return item
except AttributeError:
pass
return None | e37b7620bf484ce887e6a75f31592951ed93ac74 | 3,655,125 |
def send_message(token, message: str) -> str:
"""
A function that notifies LINENotify of the character string given as an argument
:param message:
A string to be notified
:param token:
LineNotify Access Token
:return response:
server response (thats like 200 etc...)
"""
notify = Notifer(token)
return notify.send_message(message) | 995abdc61398d9e977323e18f3a3e008fbaa1f3b | 3,655,126 |
def _fix(node):
"""Fix the naive construction of the adjont.
See `fixes.py` for details.
This function also returns the result of reaching definitions analysis so
that `split` mode can use this to carry over the state from primal to
adjoint.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
node: A module with the primal and adjoint function with additional
variable definitions and such added so that pushes onto the stack and
gradient accumulations are all valid.
defined: The variables defined at the end of the primal.
reaching: The variable definitions that reach the end of the primal.
"""
# Do reaching definitions analysis on primal and adjoint
pri_cfg = cfg.CFG.build_cfg(node.body[0])
defined = cfg.Defined()
defined.visit(pri_cfg.entry)
reaching = cfg.ReachingDefinitions()
reaching.visit(pri_cfg.entry)
cfg.forward(node.body[1], cfg.Defined())
cfg.forward(node.body[1], cfg.ReachingDefinitions())
# Remove pushes of variables that were never defined
fixes.CleanStack().visit(node)
fixes.FixStack().visit(node.body[0])
# Change accumulation into definition if possible
fixes.CleanGrad().visit(node.body[1])
# Define gradients that might or might not be defined
fixes.FixGrad().visit(node.body[1])
return node, defined.exit, reaching.exit | 27c6836366afc033e12fea254d9cf13a902d1ee7 | 3,655,127 |
def greyscale(state):
"""
Preprocess state (210, 160, 3) image into
a (80, 80, 1) image in grey scale
"""
state = np.reshape(state, [210, 160, 3]).astype(np.float32)
# grey scale
state = state[:, :, 0] * 0.299 + state[:, :, 1] * 0.587 + state[:, :, 2] * 0.114
# karpathy
state = state[35:195] # crop
state = state[::2,::2] # downsample by factor of 2
state = state[:, :, np.newaxis]
return state.astype(np.uint8) | 446651be7573eb1352a84e48780b908b0383e0ca | 3,655,128 |
def functional_common_information(dist, rvs=None, crvs=None, rv_mode=None):
"""
Compute the functional common information, F, of `dist`. It is the entropy
of the smallest random variable W such that all the variables in `rvs` are
rendered independent conditioned on W, and W is a function of `rvs`.
Parameters
----------
dist : Distribution
The distribution from which the functional common information is
computed.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the total correlation. If None, then the
total correlation is calculated over all random variables, which is
equivalent to passing `rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to condition
on. If None, then no variables are conditioned on.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
F : float
The functional common information.
"""
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
dtc = dual_total_correlation(dist, rvs, crvs, rv_mode)
ent = entropy(dist, rvs, crvs, rv_mode)
if np.isclose(dtc, ent):
return dtc
d = functional_markov_chain(dist, rvs, crvs, rv_mode)
return entropy(d, [dist.outcome_length()]) | cbeef4f042fc5a28d4d909419c1991c0501c0522 | 3,655,129 |
def kubernetes_client() -> BatchV1Api:
"""
returns a kubernetes client
"""
config.load_config()
return BatchV1Api() | 6323d5074f1af02f52eb99b62f7109793908c549 | 3,655,130 |
import types
def admin_only(func):
"""[TODO summary of func]
args:
[TODO insert arguments]
returns:
[TODO insert returns]
"""
def isadmin(invoker, chatadmins):
adminids = []
for admin in chatadmins:
adminids.append(admin.user.id)
return invoker.id in adminids
async def wrapper(message: types.Message):
invoker = message.from_user
chatadmins = await message.chat.get_administrators()
if isadmin(invoker, chatadmins):
await func(message)
# print('isadmin')
#TODO tell that an admin thing is performed
else:
# print('notadmin')
#TODO tell that an admin thing is denied
pass
return wrapper | 4e3cd62e8045b052e556b233d1a7e73adf473bfc | 3,655,131 |
def create_simple():
"""Create an instance of the `Simple` class."""
return Simple() | 98180d64e264c7842596c8f74ce28574459d2648 | 3,655,132 |
def contains_rep_info(line):
"""
Checks does that line contains link to the github repo (pretty simple 'algorithm' at the moment)
:param line: string from aa readme file
:return: true if it has link to the github repository
:type line:string
:rtype: boolean
"""
return True if line.find("https://github.com/") != -1 else False | 335e10a654510a4eda7d28d8df71030f31f98ff1 | 3,655,133 |
def GetAtomPairFingerprintAsBitVect(mol):
""" Returns the Atom-pair fingerprint for a molecule as
a SparseBitVect. Note that this doesn't match the standard
definition of atom pairs, which uses counts of the
pairs, not just their presence.
**Arguments**:
- mol: a molecule
**Returns**: a SparseBitVect
>>> from rdkit import Chem
>>> m = Chem.MolFromSmiles('CCC')
>>> v = [ pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(1),1),
... pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(2),2),
... ]
>>> v.sort()
>>> fp = GetAtomPairFingerprintAsBitVect(m)
>>> list(fp.GetOnBits())==v
True
"""
res = DataStructs.SparseBitVect(fpLen)
fp = rdMolDescriptors.GetAtomPairFingerprint(mol)
for val in fp.GetNonzeroElements():
res.SetBit(val)
return res | 9a63aa57f25d9a856d5628ec53bab0378e8088d1 | 3,655,134 |
import sqlite3
def get_registrations_by_player_id(db_cursor: sqlite3.Cursor, player_id: int) -> list[registration.Registration]:
"""
Get a list of registrations by player id.
:param db_cursor: database object to interact with database
:param player_id: player id
:return: a list of registrations
"""
db_cursor.execute("""SELECT * FROM registrations WHERE user_id = ?""", [player_id])
registration_infos = db_cursor.fetchall()
registrations = []
for registration_info in registration_infos:
registrations.append(registration.Registration.from_sqlite_table(registration_info))
return registrations | 3e87d0f7379ac7657f85225be95dd6c1d7697300 | 3,655,135 |
import sys
def main(argv=None, from_checkout=False):
"""Top-level script function to create a new Zope instance."""
if argv is None:
argv = sys.argv
try:
options = parse_args(argv, from_checkout)
except SystemExit as e:
if e.code:
return 2
else:
return 0
app = Application(options)
try:
return app.process()
except KeyboardInterrupt:
return 1
except SystemExit as e:
return e.code | b1141e580bb281b976f580f3958c3101cfea552e | 3,655,136 |
from re import L
def run_sim(alpha,db,m,DELTA,game,game_constants,i):
"""run a single simulation and save interaction data for each clone"""
rates = (DEATH_RATE,DEATH_RATE/db)
rand = np.random.RandomState()
data = [get_areas_and_fitnesses(tissue,DELTA,game,game_constants)
for tissue in lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix='exclude_final',save_areas=True,return_events=False,save_cell_histories=False,
N_limit=MAX_POP_SIZE,DELTA=DELTA,game=game,game_constants=game_constants,
mutant_num=1,domain_size_multiplier=m,rates=rates,threshold_area_fraction=alpha,generator=True)]
return data | 2201a836ff8c3da4c289908e561558c52206256b | 3,655,137 |
def IMDB(*args, **kwargs):
""" Defines IMDB datasets.
The labels includes:
- 0 : Negative
- 1 : Positive
Create sentiment analysis dataset: IMDB
Separately returns the training and test dataset
Arguments:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
removed_tokens: removed tokens from output dataset (Default: [])
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well. A custom tokenizer is callable
function with input of a string and output of a token list.
data_select: a string or tuple for the returned datasets
(Default: ('train', 'test'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import IMDB
>>> from torchtext.data.utils import get_tokenizer
>>> train, test = IMDB(ngrams=3)
>>> tokenizer = get_tokenizer("spacy")
>>> train, test = IMDB(tokenizer=tokenizer)
>>> train, = IMDB(tokenizer=tokenizer, data_select='train')
"""
return _setup_datasets(*(("IMDB",) + args), **kwargs) | 4bb55c88fc108fce350dc3c29a2ac4497ab205b1 | 3,655,138 |
from typing import List
def load_multiples(image_file_list: List, method: str='mean', stretch: bool=True, **kwargs) -> ImageLike:
"""Combine multiple image files into one superimposed image.
Parameters
----------
image_file_list : list
A list of the files to be superimposed.
method : {'mean', 'max', 'sum'}
A string specifying how the image values should be combined.
stretch : bool
Whether to normalize the images being combined by stretching their high/low values to the same values across images.
kwargs :
Further keyword arguments are passed to the load function.
Examples
--------
Load multiple images::
>>> from pylinac.core.image import load_multiples
>>> paths = ['starshot1.tif', 'starshot2.tif']
>>> superimposed_img = load_multiples(paths)
"""
# load images
img_list = [load(path, **kwargs) for path in image_file_list]
first_img = img_list[0]
# check that all images are the same size and stretch if need be
for img in img_list:
if img.shape != first_img.shape:
raise ValueError("Images were not the same shape")
if stretch:
img.array = stretcharray(img.array, fill_dtype=first_img.array.dtype)
# stack and combine arrays
new_array = np.dstack(tuple(img.array for img in img_list))
if method == 'mean':
combined_arr = np.mean(new_array, axis=2)
elif method == 'max':
combined_arr = np.max(new_array, axis=2)
elif method == 'sum':
combined_arr = np.sum(new_array, axis=2)
# replace array of first object and return
first_img.array = combined_arr
first_img.check_inversion_by_histogram()
return first_img | f61f51c89f3318d17f2223640e34573282280e4a | 3,655,139 |
def select_seeds(
img: np.ndarray, clust_result: np.ndarray, FN: int = 500,
TN: int = 700, n_clust_object: int = 2
):
"""
Sample seeds from the fluid and retina regions acording to the procedure
described in Rashno et al. 2017
Args:
img (np.ndarray): Image from where to sample the seeds.
clust_result (np.ndarray): Image from with the clustering labels.
FN (int, optional): Number of fluid points to sample. Defaults to 500.
TN (int, optional): Number of ratina points to sample. Defaults to 700.
n_clust_object (int, optional): number of clusters assigned to fluid.
Returns:
fluid_seeds, retina_seeds
"""
n_clust = len(np.unique(clust_result)) - 1
clusters_centers = []
for i in range(1, n_clust+1):
clusters_centers.append(np.mean(img[clust_result == i]))
clusters_centers = np.array(clusters_centers)
indices = np.flip(np.argsort(clusters_centers)) + 1
# Fluid Seeds
fluid_condition = (clust_result == indices[0])
for i in range(n_clust_object):
fluid_condition = fluid_condition | (clust_result == indices[i])
potential_seeds = np.array(np.where(fluid_condition)).T
sample_indx = np.random.randint(0, potential_seeds.shape[0], FN)
fluid_seeds = potential_seeds[sample_indx]
# Retina Seeds:
# Get sampling probabilities and number of samples per cluster
pi = 1/(2**np.arange(1, n_clust-n_clust_object+1))
Npi = np.ones((n_clust-n_clust_object))*70
pre_asigned = (n_clust-n_clust_object)*70
Npi = Npi + np.round((pi/np.sum(pi))*(700-pre_asigned))
Npi = Npi.astype('int')
# Npi = (np.ones((n_clust-n_clust_object))*(700 / n_clust)).astype('int')
# Sample seeds
retina_seeds = []
for i in range(n_clust_object, len(indices)):
bkg_condition = (clust_result == indices[i])
potential_seeds = np.array(np.where(bkg_condition)).T
sample_indx = \
np.random.randint(0, potential_seeds.shape[0], Npi[i-n_clust_object])
retina_seeds.append(potential_seeds[sample_indx])
retina_seeds = np.concatenate(retina_seeds)
return fluid_seeds, retina_seeds, clusters_centers, indices | afdfe7654b53b5269f42c6c74d07265623839d75 | 3,655,140 |
def common(list1, list2):
"""
This function is passed two lists and returns a new list containing
those elements that appear in both of the lists passed in.
"""
common_list = []
temp_list = list1.copy()
temp_list.extend(list2)
temp_list = list(set(temp_list))
temp_list.sort()
for i in temp_list:
if (i in list1) and (i in list2):
common_list.append(i)
return common_list | 021605a2aad6c939155a9a35b8845992870100f0 | 3,655,141 |
def create_combobox(root, values, **kwargs):
"""Creates and Grids A Combobox"""
box = ttk.Combobox(root, values=values, **kwargs)
box.set(values[0])
return box | 7406dca6ab99d9130a09a6cb25220c1e40148cc0 | 3,655,142 |
from datetime import datetime
import logging
import sys
def configure_logging_console(logger_type):
"""
Configure logger
:param logger_type: The type to write logger and setup on the modules of App
:return _imoporter_log:
"""
_date_name = datetime.now().strftime('%Y-%m-%dT%H%M')
_importer_logger = logging.getLogger(logger_type)
_importer_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - Module: %(module)s - Line No: %(lineno)s : %(name)s : %(levelname)s - '
'%(message)s')
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
_importer_logger.addHandler(sh)
return _importer_logger | 412e0c491af07ebff0f9b7a851635017e047a216 | 3,655,143 |
def chi_x2(samples,df):
"""
Compute the central chi-squared statistics for set of chi-squared
distributed samples.
Parameters:
- - - - -
samples : chi-square random variables
df : degrees of freedom
"""
return chi2.pdf(samples,df) | 5700803396e78c7a5658c05ff1b7bd7ae3bd6722 | 3,655,144 |
def integrate(
pc2i,
eos,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
):
"""integrate the TOV equations with central pressure "pc2i" and equation of state described by energy density "eps/c2" and pressure "p/c2"
expects eos = (logenthalpy, pressurec2, energy_densityc2, baryon_density, cs2c2)
"""
### define initial condition
logh, vec = initial_condition(pc2i, eos, frac=initial_frac)
m, r, eta, omega, mb = engine(
logh,
vec,
eos,
dvecdlogh,
rtol=rtol,
)
# compute tidal deformability
l = eta2lambda(r, m, eta)
# compute moment of inertia
i = omega2i(r, omega)
# convert to "standard" units
m /= Msun ### reported in units of solar masses, not grams
mb /= Msun
r *= 1e-5 ### convert from cm to km
i /= 1e45 ### normalize this to a common value but still in CGS
return m, r, l, i, mb | 5934da344fc6927d397dccc7c2a730436e1106d5 | 3,655,145 |
import oci.exceptions
def add_ingress_port_to_security_lists(**kwargs):
"""Checks if the given ingress port already is a security list,
if not it gets added.
Args:
**kwargs: Optional parameters
Keyword Args:
security_lists (list): A list of security_lists.
port (int): The port to check
description (str): A description for the ingress rule
compartment_id (str): The OCID of the compartment
config (object): An OCI config object or None.
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
Returns:
True on success
"""
security_lists = kwargs.get("security_lists")
port = kwargs.get("port")
description = kwargs.get("description")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
if security_lists is None:
raise ValueError("No security_lists given.")
try:
# Get the active config and compartment
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
for sec_list in security_lists:
for rule in sec_list.ingress_security_rules:
if rule.tcp_options is not None and \
port >= rule.tcp_options.destination_port_range.min and \
port <= rule.tcp_options.destination_port_range.max and \
rule.protocol == "6" and \
rule.source == "0.0.0.0/0":
return True
if len(security_lists) == 0:
raise Exception("No security list available for this network.")
sec_list = security_lists[0]
try:
network_client = core.get_oci_virtual_network_client(
config=config)
sec_list.ingress_security_rules.append(
oci.core.models.IngressSecurityRule(
protocol="6",
source="0.0.0.0/0",
is_stateless=False,
source_type="CIDR_BLOCK",
tcp_options=oci.core.models.TcpOptions(
destination_port_range=oci.core.models.PortRange(
max=port,
min=port),
source_port_range=None),
udp_options=None,
description=description
)
)
details = oci.core.models.UpdateSecurityListDetails(
defined_tags=sec_list.defined_tags,
display_name=sec_list.display_name,
egress_security_rules=sec_list.egress_security_rules,
freeform_tags=sec_list.freeform_tags,
ingress_security_rules=sec_list.ingress_security_rules
)
network_client.update_security_list(
security_list_id=sec_list.id,
update_security_list_details=details)
return True
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise
print(f'Could not list the availability domains for this '
f'compartment.\nERROR: {str(e)}') | 0fd1cdc05ea3c035c5424f2e179d2655e4b84bd4 | 3,655,146 |
def describe_cluster_instances(stack_name, node_type):
"""Return the cluster instances optionally filtered by tag."""
instances = _describe_cluster_instances(stack_name, filter_by_node_type=str(node_type))
if not instances:
# Support for cluster that do not have aws-parallelcluster-node-type tag
LOGGER.debug("Falling back to Name tag when describing cluster instances")
instances = _describe_cluster_instances(stack_name, filter_by_name=str(node_type))
return instances | cdb09b0dd15b6c549895280dc79ea4ccb122a3d9 | 3,655,147 |
import os
def region_root(data_dir):
"""Returns the path of test regions."""
return os.path.join(data_dir, 'regions') | a49385b4cdb550e88b5016783c09b4f9dbebd216 | 3,655,148 |
def list_statistics_keys():
"""ListStatistics definition"""
return ["list", "counts"] | 39521910b4dbde3fc6c9836460c73945561be731 | 3,655,149 |
def forecast_handler(req, req_body, res, res_body, zip):
"""Handles forecast requests"""
return True | a2e35eaad472cfd52dead476d18d18ee2bcd3f6f | 3,655,150 |
def _configure_output(args):
"""
Configures the output. Loads templates and applies the specified formatter if any.
If none of these configurations are specified, it will return the default output
which is to print each value to standard out.
"""
writer = _get_writer(args)
if args.template:
log.debug('Using template: %s', args.template)
if '{{' in args.template:
engine = template_engines.string(args.template)
else:
engine = template_engines.for_file(args.template)
return outputs.RecordLevelOutput(engine, writer)
if args.format:
log.debug('Using %s formatter for output', args.format)
formatter = outputs.FormatProcessor(args.format)
return outputs.RecordLevelOutput(formatter, writer)
# default
return outputs.SingleFieldOutput(writer, args.printkey) | 69a279a63f858ef1124533720179eea9b7c3589a | 3,655,151 |
def refToMastoidsNP(data, M1, M2):
"""
"""
mastoidsMean = np.mean([M1, M2], axis=0)
mastoidsMean = mastoidsMean.reshape(mastoidsMean.shape[0], 1)
newData = data - mastoidsMean
return newData | 15f50718fb1ea0d7b0fc2961a3a9b8d1baa98636 | 3,655,152 |
from typing import Dict
from typing import Callable
from typing import Any
def override_kwargs(
kwargs: Dict[str, str],
func: Callable[..., Any],
filter: Callable[..., Any] = lambda _: True,
) -> Dict[str, str]:
"""Override the kwargs of a function given a function to apply and an optional filter.
Parameters
----------
kwargs : Tuple
The function kwargs input.
func : Callable
A function to apply on the kwargs.
filter : Callable
An optional filter to apply the function only on some kwargs. (Default value = lambda _: True).
Returns
-------
Dict
The changed kwargs as a Dict.
"""
return {
key: func(value) if filter(value) else value for key, value in kwargs.items()
} | 31c689a1e2df1e5168f784011fbac6cf4a86bf13 | 3,655,153 |
def prepare_for_revival(bucket, obj_prefix):
"""
Makes a manifest for reviving any deleted objects in the bucket. A deleted
object is one that has a delete marker as its latest version.
:param bucket: The bucket that contains the stanzas.
:param obj_prefix: The prefix of the uploaded stanzas.
:return: The manifest as a list of lines in CSV format.
"""
try:
response = s3.meta.client.list_object_versions(
Bucket=bucket.name, Prefix=f'{obj_prefix}stanza')
manifest_lines = [
f"{bucket.name},{parse.quote(marker['Key'])},{marker['VersionId']}"
for marker in response['DeleteMarkers']
if marker['IsLatest']
]
except ClientError:
logger.exception("Couldn't get object versions from %s.", bucket.name)
raise
return manifest_lines | 879182e354d94f1c24cddd233e7c004939c4d0c0 | 3,655,154 |
import ast
import json
import os
def make_drive_resource() -> Resource:
"""
Authenticates and returns a google drive resource.
"""
google_oauth_creds = ast.literal_eval(
credstash.getSecret("IA_PIPELINE_GLOBAL_GOOGLE_SHEETS_API_KEY")
)
with open("key.json", "w") as fp:
json.dump(google_oauth_creds, fp)
creds = service_account.Credentials.from_service_account_file(
"key.json", scopes=SCOPES
)
os.remove("key.json")
scoped_creds = creds.with_subject(DELEGATE_EMAIL)
http = AuthorizedHttp(scoped_creds)
return discovery.build("drive", "v3", http=http) | 7cf2d67791d63f6d3995dce3702775fad1f52a45 | 3,655,155 |
def make_subparser(sub, command_name, help, command_func=None, details=None, **kwargs):
"""
Create the "sub-parser" for our command-line parser.
This facilitates having multiple "commands" for a single script,
for example "norm_yaml", "make_rest", etc.
"""
if command_func is None:
command_func_name = "command_{0}".format(command_name)
command_func = globals()[command_func_name]
# Capitalize the first letter for the long description.
desc = help[0].upper() + help[1:]
if details is not None:
desc += "\n\n{0}".format(details)
desc = _wrap(desc)
parser = sub.add_parser(command_name, formatter_class=FORMATTER_CLASS,
help=help, description=desc, **kwargs)
parser.set_defaults(run_command=command_func)
return parser | acd2467c78f2ff477a5f0412cf48cbef19882f2c | 3,655,156 |
import os
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
) | cc4cc77d2bac7b2ca2718135f0ecfd31d5ce7f1e | 3,655,157 |
def application():
""" Flask application fixture. """
def _view():
return 'OK', 200
application = Flask('test-application')
application.testing = True
application.add_url_rule('/', 'page', view_func=_view)
return application | bd9168a66cb8db8c7a2b816f1521d5633fbdcbf8 | 3,655,158 |
def get_qe_specific_fp_run_inputs(
configure, code_pw, code_wannier90, code_pw2wannier90,
get_repeated_pw_input, get_metadata_singlecore
):
"""
Creates the InSb inputs for the QE fp_run workflow. For the
higher-level workflows (fp_tb, optimize_*), these are passed
in the 'fp_run' namespace.
"""
def inner():
return {
'scf': get_repeated_pw_input(),
'bands': {
'pw': get_repeated_pw_input()
},
'to_wannier': {
'nscf': get_repeated_pw_input(),
'wannier': {
'code': code_wannier90,
'metadata': get_metadata_singlecore()
},
'pw2wannier': {
'code': code_pw2wannier90,
'metadata': get_metadata_singlecore()
}
}
}
return inner | b0f8fd6536a237ade55139ef0ec6daaad8c0fb08 | 3,655,159 |
def _get_cohort_representation(cohort, course):
"""
Returns a JSON representation of a cohort.
"""
group_id, partition_id = cohorts.get_group_info_for_cohort(cohort)
assignment_type = cohorts.get_assignment_type(cohort)
return {
'name': cohort.name,
'id': cohort.id,
'user_count': cohort.users.filter(courseenrollment__course_id=course.location.course_key,
courseenrollment__is_active=1).count(),
'assignment_type': assignment_type,
'user_partition_id': partition_id,
'group_id': group_id,
} | aaa2c7b9c53e3a49ebc97e738077a0f6873d0559 | 3,655,160 |
import json
def config_string(cfg_dict):
""" Pretty-print cfg_dict with one-line queries """
upper_level = ["queries", "show_attributes", "priority", "gtf", "bed", "prefix", "outdir", "threads", "output_by_query"]
query_level = ["feature", "feature_anchor", "distance", "strand", "relative_location", "filter_attribute", "attribute_values", "internals", "name"]
upper_lines = []
for upper_key in upper_level:
if upper_key == "queries":
query_lines = "\"queries\":[\n"
#Convert sets to lists
for query in cfg_dict["queries"]:
for key in query:
if type(query[key]) == set:
query[key] = list(query[key])
query_strings = [json.dumps(query, sort_keys=True) for query in cfg_dict["queries"]]
query_lines += " " + ",\n ".join(query_strings) + "\n ]"
upper_lines.append(query_lines)
elif upper_key == "show_attributes" and upper_key in cfg_dict:
upper_lines.append("\"{0}\": {1}".format(upper_key, json.dumps(cfg_dict[upper_key])))
else:
if upper_key in cfg_dict:
upper_lines.append("\"{0}\": \"{1}\"".format(upper_key, cfg_dict[upper_key]))
config_string = "{\n" + ",\n".join(upper_lines) + "\n}\n"
return(config_string) | c6533512b6f87fea1726573c0588bbd3ddd54e41 | 3,655,161 |
def area_km2_per_grid(infra_dataset, df_store):
"""Total area in km2 per assettype per grid, given in geographic coordinates
Arguments:
*infra_dataset* : a shapely object with WGS-84 coordinates
*df_store* : (empty) geopandas dataframe containing coordinates per grid for each grid
Returns:
area in km2 per assettype per grid in dataframe (with column = {asset}_km2 and row = the grid)
"""
asset_list = []
for asset in infra_dataset.asset.unique():
if not "{}_count".format(asset) in df_store.columns: df_store.insert(1, "{}_count".format(asset), "") #add assettype as column after first column for count calculations
if not "{}_km2".format(asset) in df_store.columns: df_store.insert(1, "{}_km2".format(asset), "") #add assettype as column after first column for area calculations
asset_list.append(asset)
for grid_row in df_store.itertuples():
grid_cell = grid_row.geometry #select grid
try:
asset_clip = gpd.clip(infra_dataset, grid_cell) #clip infra data using GeoPandas clip
#count per asset type
count = asset_clip.asset.value_counts() #count number of assets per asset type
for asset_type in asset_list:
if asset_type in count.index:
df_store.loc[grid_row.Index, "{}_count".format(asset_type)] = count.get(key = asset_type)
else:
df_store.loc[grid_row.Index, "{}_count".format(asset_type)] = 0
#calculate area for each asset in clipped infrastructure grid
asset_clip.insert(1, "area_km2", "") #add assettype as column after first column for length calculations
for polygon_object in asset_clip['index']:
asset_clip.loc[polygon_object, "area_km2"] = polygon_area((asset_clip.loc[asset_clip['index']==polygon_object].geometry.item())) #calculate area per object and put in dataframe
area_per_type = asset_clip.groupby(['asset'])['area_km2'].sum() #get total length per asset_type in grid
for asset_type in asset_list:
if asset_type in area_per_type.index:
df_store.loc[grid_row.Index, "{}_km2".format(asset_type)] = area_per_type.get(key = asset_type)
else:
df_store.loc[grid_row.Index, "{}_km2".format(asset_type)] = 0
except:
print("Grid number {} raises a ValueError, area has not been clipped".format(grid_row.index))
for asset_type in asset_list:
df_store.loc[grid_row.Index, "{}_count".format(asset_type)] = np.nan
df_store.loc[grid_row.Index, "{}_km2".format(asset_type)] = np.nan
return df_store | 3d4b516429235f6b20a56801b0ef98e3fd80306d | 3,655,162 |
from click.testing import CliRunner
def cli_runner(script_info):
"""Create a CLI runner for testing a CLI command.
Scope: module
.. code-block:: python
def test_cmd(cli_runner):
result = cli_runner(mycmd)
assert result.exit_code == 0
"""
def cli_invoke(command, input=None, *args):
return CliRunner().invoke(command, args, input=input, obj=script_info)
return cli_invoke | 3593354dd190bcc36f2099a92bad247c9f7c7cf1 | 3,655,163 |
def sgf_to_gamestate(sgf_string):
"""
Creates a GameState object from the first game in the given collection
"""
# Don't Repeat Yourself; parsing handled by sgf_iter_states
for (gs, move, player) in sgf_iter_states(sgf_string, True):
pass
# gs has been updated in-place to the final state by the time
# sgf_iter_states returns
return gs | 1c1a6274769abb654d51dc02d0b3182e7a9fd1f6 | 3,655,164 |
def get_titlebar_text():
"""Return (style, text) tuples for startup."""
return [
("class:title", "Hello World!"),
("class:title", " (Press <Exit> to quit.)"),
] | 947b94f2e85d7a172f5c0ba84db0ec78045a0f6c | 3,655,165 |
import json
def image_fnames_captions(captions_file, images_dir, partition):
"""
Loads annotations file and return lists with each image's path and caption
Arguments:
partition: string
either 'train' or 'val'
Returns:
all_captions: list of strings
list with each image caption
all_img_paths: list of paths as strings
list with each image's path to file
"""
with open(captions_file, 'r') as f:
annotations = json.load(f)
all_captions = []
all_img_paths = []
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = images_dir / ('COCO_{}2014_'.format(partition) + \
'{:012d}.jpg'.format(image_id))
all_img_paths.append(full_coco_image_path)
all_captions.append(caption)
return all_captions, all_img_paths | f592decefaded079fca92091ad795d67150b4ca8 | 3,655,166 |
def build_menu(
buttons: list,
columns: int = 3,
header_button=None,
footer_button=None,
resize_keyboard: bool = True
):
"""Хелпер для удобного построения меню."""
menu = [buttons[i:i + columns] for i in range(0, len(buttons), columns)]
if header_button:
menu.insert(0, [header_button])
if footer_button:
menu.append([footer_button])
return ReplyKeyboardMarkup(menu, resize_keyboard=resize_keyboard) | d375a7af5f5e45e4b08520561c70d8c2664af4ef | 3,655,167 |
def pandas_dataframe_to_unit_arrays(df, column_units=None):
"""Attach units to data in pandas dataframes and return united arrays.
Parameters
----------
df : `pandas.DataFrame`
Data in pandas dataframe.
column_units : dict
Dictionary of units to attach to columns of the dataframe. Overrides
the units attribute if it is attached to the dataframe.
Returns
-------
Dictionary containing united arrays with keys corresponding to the dataframe
column names.
"""
if not column_units:
try:
column_units = df.units
except AttributeError:
raise ValueError('No units attribute attached to pandas '
'dataframe and col_units not given.')
# Iterate through columns attaching units if we have them, if not, don't touch it
res = {}
for column in df:
if column in column_units and column_units[column]:
res[column] = df[column].values * units(column_units[column])
else:
res[column] = df[column].values
return res | 41aff3bd785139f4d99d677e09def2764448acf2 | 3,655,168 |
from typing import Any
def is_empty(value: Any) -> bool:
"""
empty means given value is one of none, zero length string, empty list, empty dict
"""
if value is None:
return True
elif isinstance(value, str):
return len(value) == 0
elif isinstance(value, list):
return len(value) == 0
elif isinstance(value, dict):
return len(value) == 0
else:
return False | fd4c68dd5f0369e0836ab775d73424360bad9219 | 3,655,169 |
import os
def get_worksheets (path, **kwargs):
"""
Gets all available worksheets within a xlsx-file and returns a list
:param path: Path to excel file
:type path: str
:return: Returns a list with all worksheets within the excel-file
:rtype: list
"""
if not os.path.isabs(path):
path = os.path.join(_jinjamator.task_base_dir, path)
xlsx = XLSXReader(
path, "Sheet1", kwargs.get("cache", True)
)
return xlsx.get_worksheets() | 17b2f72cbda1c2abfb190d9592d3f7c7520ccc83 | 3,655,170 |
import caffe_parser
import numpy as np
def read_caffe_mean(caffe_mean_file):
"""
Reads caffe formatted mean file
:param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix
:return: mean image, converted from BGR to RGB format
"""
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(caffe_mean_file, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(mean_blob.channels, mean_blob.height, mean_blob.width)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
return img_mean_np | 6835bf429f3caca6308db450bb18e9254ff2b9a0 | 3,655,171 |
def estimate_pauli_sum(pauli_terms,
basis_transform_dict,
program,
variance_bound,
quantum_resource,
commutation_check=True,
symmetrize=True,
rand_samples=16):
"""
Estimate the mean of a sum of pauli terms to set variance
The sample variance is calculated by
.. math::
\begin{align}
\mathrm{Var}[\hat{\langle H \rangle}] = \sum_{i, j}h_{i}h_{j}
\mathrm{Cov}(\hat{\langle P_{i} \rangle}, \hat{\langle P_{j} \rangle})
\end{align}
The expectation value of each Pauli operator (term and coefficient) is
also returned. It can be accessed through the named-tuple field
`pauli_expectations'.
:param pauli_terms: list of pauli terms to measure simultaneously or a
PauliSum object
:param basis_transform_dict: basis transform dictionary where the key is
the qubit index and the value is the basis to
rotate into. Valid basis is [I, X, Y, Z].
:param program: program generating a state to sample from. The program
is deep copied to ensure no mutation of gates or program
is perceived by the user.
:param variance_bound: Bound on the variance of the estimator for the
PauliSum. Remember this is the SQUARE of the
standard error!
:param quantum_resource: quantum abstract machine object
:param Bool commutation_check: Optional flag toggling a safety check
ensuring all terms in `pauli_terms`
commute with each other
:param Bool symmetrize: Optional flag toggling symmetrization of readout
:param Int rand_samples: number of random realizations for readout symmetrization
:return: estimated expected value, expected value of each Pauli term in
the sum, covariance matrix, variance of the estimator, and the
number of shots taken. The objected returned is a named tuple with
field names as follows: expected_value, pauli_expectations,
covariance, variance, n_shots.
`expected_value' == coef_vec.dot(pauli_expectations)
:rtype: EstimationResult
"""
if not isinstance(pauli_terms, (list, PauliSum)):
raise TypeError("pauli_terms needs to be a list or a PauliSum")
if isinstance(pauli_terms, PauliSum):
pauli_terms = pauli_terms.terms
# check if each term commutes with everything
if commutation_check:
if len(commuting_sets(sum(pauli_terms))) != 1:
raise CommutationError("Not all terms commute in the expected way")
program = program.copy()
pauli_for_rotations = PauliTerm.from_list(
[(value, key) for key, value in basis_transform_dict.items()])
program += get_rotation_program(pauli_for_rotations)
qubits = sorted(list(basis_transform_dict.keys()))
if symmetrize:
theta = program.declare("ro_symmetrize", "REAL", len(qubits))
for (idx, q) in enumerate(qubits):
program += [RZ(np.pi/2, q), RY(theta[idx], q), RZ(-np.pi/2, q)]
ro = program.declare("ro", "BIT", memory_size=len(qubits))
for num, qubit in enumerate(qubits):
program.inst(MEASURE(qubit, ro[num]))
coeff_vec = np.array(
list(map(lambda x: x.coefficient, pauli_terms))).reshape((-1, 1))
# upper bound on samples given by IV of arXiv:1801.03524
num_sample_ubound = 10 * int(np.ceil(np.sum(np.abs(coeff_vec))**2 / variance_bound))
if num_sample_ubound <= 2:
raise ValueError("Something happened with our calculation of the max sample")
if symmetrize:
if min(STANDARD_NUMSHOTS, num_sample_ubound)//rand_samples == 0:
raise ValueError(f"The number of shots must be larger than {rand_samples}.")
program = program.wrap_in_numshots_loop(min(STANDARD_NUMSHOTS, num_sample_ubound)//rand_samples)
else:
program = program.wrap_in_numshots_loop(min(STANDARD_NUMSHOTS, num_sample_ubound))
binary = quantum_resource.compiler.native_quil_to_executable(basic_compile(program))
results = None
sample_variance = np.infty
number_of_samples = 0
tresults = np.zeros((0, len(qubits)))
while (sample_variance > variance_bound and number_of_samples < num_sample_ubound):
if symmetrize:
# for some number of times sample random bit string
for r in range(rand_samples):
rand_flips = np.random.randint(low=0, high=2, size=len(qubits))
temp_results = quantum_resource.run(binary, memory_map={'ro_symmetrize': np.pi * rand_flips})
tresults = np.vstack((tresults, rand_flips ^ temp_results))
else:
tresults = quantum_resource.run(binary)
number_of_samples += len(tresults)
parity_results = get_parity(pauli_terms, tresults)
# Note: easy improvement would be to update mean and variance on the fly
# instead of storing all these results.
if results is None:
results = parity_results
else:
results = np.hstack((results, parity_results))
# calculate the expected values....
covariance_mat = np.cov(results, ddof=1)
sample_variance = coeff_vec.T.dot(covariance_mat).dot(coeff_vec) / (results.shape[1] - 1)
return EstimationResult(expected_value=coeff_vec.T.dot(np.mean(results, axis=1)),
pauli_expectations=np.multiply(coeff_vec.flatten(), np.mean(results, axis=1).flatten()),
covariance=covariance_mat,
variance=sample_variance,
n_shots=results.shape[1]) | 8dc63069229cf83164196c1ca2e29d20c5be2756 | 3,655,172 |
def GetQuasiSequenceOrderp(ProteinSequence, maxlag=30, weight=0.1, distancematrix={}):
"""
###############################################################################
Computing quasi-sequence-order descriptors for a given protein.
[1]:Kuo-Chen Chou. Prediction of Protein Subcellar Locations by
Incorporating Quasi-Sequence-Order Effect. Biochemical and Biophysical
Research Communications 2000, 278, 477-483.
Usage:
result = GetQuasiSequenceOrderp(protein,maxlag,weight,distancematrix)
Input: protein is a pure protein sequence
maxlag is the maximum lag and the length of the protein should be larger
than maxlag. default is 30.
weight is a weight factor. please see reference 1 for its choice. default is 0.1.
distancematrix is a dict form containing 400 distance values
Output: result is a dict form containing all quasi-sequence-order descriptors
###############################################################################
"""
result = dict()
result.update(GetQuasiSequenceOrder1(ProteinSequence, maxlag, weight, distancematrix))
result.update(GetQuasiSequenceOrder2(ProteinSequence, maxlag, weight, distancematrix))
return result | f59c60826e2dc40db6827ac263423cf69c338d89 | 3,655,173 |
def check(lst: list, search_element: int) -> bool:
"""Check if the list contains the search_element."""
return any([True for i in lst if i == search_element]) | 15f35ceff44e9fde28f577663e79a2216ffce148 | 3,655,174 |
def halfcube(random_start=0,random_end=32,halfwidth0=1,pow=-1):
"""
Produce a halfcube with given dimension and decaying power
:param random_start: decay starting parameter
:param random_end: decay ending parameter
:param halfwidth0: base halfwidth
:param pow: decaying power
:return: A (random_end-random_start,) array
"""
ran=np.arange(random_start,random_end,dtype=float)
ran[0]=1.0
return ran**pow*halfwidth0 | eb3acfe76abf2ba2ddec73973a875ad7509cd265 | 3,655,175 |
def valid_passphrase(module, **kwargs):
"""Tests whether the given passphrase is valid for the specified device.
Return: <boolean> <error>"""
for req in ["device", "passphrase"]:
if req not in kwargs or kwargs[req] is None:
errmsg = "valid_passphrase: {0} is a required parameter".format(req)
return False, {"msg": errmsg}
is_keyfile = kwargs.get("is_keyfile", False)
slot = kwargs.get("slot", None)
args = ["cryptsetup", "open", "--test-passphrase", kwargs["device"]]
if slot is not None:
args.extend(["--key-slot", str(slot)])
_unused, err = run_cryptsetup(
module, args, passphrase=kwargs["passphrase"], is_keyfile=is_keyfile
)
if err:
errmsg = "valid_passphrase: We need a valid passphrase for {0}".format(
kwargs["device"]
)
return False, {"msg": errmsg, "err": err}
return True, None | c6355a4c75b8973372b5d01817fa59569746ed6c | 3,655,176 |
def contract_address(deploy_hash_base16: str, fn_store_id: int) -> bytes:
"""
Should match what the EE does (new_function_address)
//32 bytes for deploy hash + 4 bytes ID
blake2b256( [0;32] ++ [0;4] )
deploy_hash ++ fn_store_id
"""
def hash(data: bytes) -> bytes:
h = blake2b(digest_size=32)
h.update(data)
return h.digest()
deploy_hash_bytes = bytes.fromhex(deploy_hash_base16)
counter_bytes = fn_store_id.to_bytes(4, "little")
data = deploy_hash_bytes + counter_bytes
return hash(data) | 0623209f88a59c1a2cbe8460603f920ff66575f1 | 3,655,177 |
import json
def dump_js_escaped_json(obj, cls=EdxJSONEncoder):
"""
JSON dumps and escapes objects that are safe to be embedded in JavaScript.
Use this for anything but strings (e.g. dicts, tuples, lists, bools, and
numbers). For strings, use js_escaped_string.
The output of this method is also usable as plain-old JSON.
Usage:
Used as follows in a Mako template inside a <SCRIPT> tag::
var json_obj = ${obj | n, dump_js_escaped_json}
If you must use the cls argument, then use as follows::
var json_obj = ${dump_js_escaped_json(obj, cls) | n}
Use the "n" Mako filter above. It is possible that the default filter
may include html escaping in the future, and this ensures proper
escaping.
Ensure ascii in json.dumps (ensure_ascii=True) allows safe skipping of
Mako's default filter decode.utf8.
Arguments:
obj: The object soon to become a JavaScript escaped JSON string. The
object can be anything but strings (e.g. dicts, tuples, lists, bools, and
numbers).
cls (class): The JSON encoder class (defaults to EdxJSONEncoder).
Returns:
(string) Escaped encoded JSON.
"""
obj = list(obj) if isinstance(obj, type({}.values())) else obj # lint-amnesty, pylint: disable=isinstance-second-argument-not-valid-type, dict-values-not-iterating, line-too-long
json_string = json.dumps(obj, ensure_ascii=True, cls=cls)
json_string = _escape_json_for_js(json_string)
return json_string | eba36fbf101c0779fe5756fa6fbfe8f0d2c5686c | 3,655,178 |
from typing import NamedTuple
def RawTuple(num_fields, name_prefix='field'):
"""
Creates a tuple of `num_field` untyped scalars.
"""
assert isinstance(num_fields, int)
assert num_fields >= 0
return NamedTuple(name_prefix, *([np.void] * num_fields)) | 3287a827099098e1550141e9e99321f75a9317f6 | 3,655,179 |
def pose2mat(R, p):
""" convert pose to transformation matrix """
p0 = p.ravel()
H = np.block([
[R, p0[:, np.newaxis]],
[np.zeros(3), 1]
])
return H | 626cbfcf5c188d4379f60b0e2d7b399aece67e8c | 3,655,180 |
def _fill_missing_values(df=None):
"""replace missing values with NaN"""
# fills in rows where lake refroze in same season
df['WINTER'].replace(to_replace='"', method='ffill', inplace=True)
# use nan as the missing value
for headr in ['DAYS', 'OPENED', 'CLOSED']:
df[headr].replace(to_replace=['-', '--', '---'], value=_np.nan, inplace=True)
return df.sort_values(by=['WINTER']) | b86bfdac06d6e22c47d7b905d9cb7b5feba40fdb | 3,655,181 |
def csi_prelu(data, alpha, axis, out_dtype, q_params, layer_name=""):
"""Quantized activation relu.
Parameters
----------
data : relay.Expr
The quantized input data.
alpha : relay.Expr
The quantized alpha.
out_dtype : str
Specifies the output data type for mixed precision dense can be uint8.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.CSIPRelu(data, alpha, axis, out_dtype, q_params, layer_name) | 8e8e3c09ae7c3cb3b89f1229cfb83836682e5bc8 | 3,655,182 |
def json(filename):
"""Returns the parsed contents of the given JSON fixture file."""
content = contents(filename)
return json_.loads(content) | fc66afdfe1a04ac8ef65272e55283de98c145f8c | 3,655,183 |
def _parse_assayData(assayData, assay):
"""Parse Rpy2 assayData (Environment object)
assayData: Rpy2 Environment object.
assay: An assay name indicating the data to be loaded.
Return a parsed expression dataframe (Pandas).
"""
pandas2ri.activate()
mat = assayData[assay] # rpy2 expression matrix object
data = pandas2ri.ri2py(mat)
features = pandas2ri.ri2py(r.rownames(mat))
samples = pandas2ri.ri2py(r.colnames(mat))
return pd.DataFrame(data, index=features, columns=samples) | aea2e5fe25eaf563fdd7ed981d7486fdf39098b4 | 3,655,184 |
def method_list():
""" list of available electronic structure methods
"""
return theory.METHOD_LST | d1ad84bc709db1973f83e5a743bf4aed21f98652 | 3,655,185 |
def readReadQualities(fastqfile):
"""
Reads a .fastqfile and calculates a defined readscore
input: fastq file
output: fastq dictionary key = readid; value = qualstr
@type fastqfile: string
@param fastqfile: path to fastq file
@rtype: dictionary
@return: dictionary containing read ids and read qualities.
"""
fastq_file = HTSeq.FastqReader(fastqfile , "phred")
readdictionary = {}
for read in fastq_file:
readdictionary[read.name.split()[0]] = ComputeRQScore(read.qualstr)
print("\tReading Fastq file done!")
return readdictionary | 8af9c0fc0c2d8f3a4d2f93ef81489098cb572643 | 3,655,186 |
from typing import Optional
from typing import Any
from typing import Dict
async def default_field_resolver(
parent: Optional[Any],
args: Dict[str, Any],
ctx: Optional[Any],
info: "ResolveInfo",
) -> Any:
"""
Default callable to use as resolver for field which doesn't implement a
custom one.
:param parent: default root value or field parent value
:param args: computed arguments related to the resolved field
:param ctx: context passed to the query execution
:param info: information related to the execution and the resolved field
:type parent: Optional[Any]
:type args: Dict[str, Any]
:type ctx: Optional[Any]
:type info: ResolveInfo
:return: the computed field value
:rtype: Any
"""
# pylint: disable=unused-argument
try:
return getattr(parent, info.field_name)
except AttributeError:
pass
try:
return parent[info.field_name]
except (KeyError, TypeError):
pass
return None | d0458cc10a968c359c9165ac59be60ece0270c41 | 3,655,187 |
from typing import List
from typing import Dict
def revision_list_to_str(diffs: List[Dict]) -> str:
"""Convert list of diff ids to a comma separated list, prefixed with "D"."""
return ', '.join([diff_to_str(d['id']) for d in diffs]) | fbaa4473daf1e4b52d089c801f2db46aa7485972 | 3,655,188 |
from typing import Optional
from pathlib import Path
import time
def get_path_of_latest_file() -> Optional[Path]:
"""Gets the path of the latest produced file that contains weight information"""
path = Path(storage_folder)
latest_file = None
time_stamp_latest = -1
for entry in path.iterdir():
if entry.is_file():
if latest_file == None:
latest_file = entry
time_stamp_latest = time.mktime(get_time_tuple_from_filename(entry.name))
else:
time_stamp_latest = time.mktime(get_time_tuple_from_filename(latest_file.name))
time_stamp_current = time.mktime(get_time_tuple_from_filename(entry.name))
if time_stamp_current > time_stamp_latest:
latest_file = entry
# print_d(f"Latest file: {latest_file}")
return latest_file | a72308c4b3852429d6959552cc90779a4ee03dc5 | 3,655,189 |
def index():
"""
A function than returns the home page when called upon
"""
#get all available news sources
news_sources = get_sources()
#get all news articles available
everything = get_everything()
print(everything)
# title = 'Home - Find all the current news at your convinience'
return render_template('index.html', news_sources = news_sources, everything = everything) | 4cf1eacaa550ce1ffd0ed954b60a7c7adf1702a6 | 3,655,190 |
import base64
import logging
import traceback
import sys
def parse_secret_from_literal(literal):
"""Parse a literal string, into a secret dict.
:param literal: String containg a key and a value. (e.g. 'KEY=VALUE')
:returns secret: Dictionary in the format suitable for sending
via http request.
"""
try:
key, value = literal.split("=", 1)
secret = {
key: {
"value": base64.b64encode(value.encode("utf-8")).decode("utf-8"),
"type": "env",
}
}
return secret
except ValueError as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
'Option "{0}" is invalid: \n'
'For literal strings use "SECRET_NAME=VALUE" format'.format(literal),
msg_type="error",
)
sys.exit(1) | de1bc56d0e2d314fd56b8d58604fdd693362bda1 | 3,655,191 |
def blur(img):
"""
:param img: SimpleImage, the input image
:return: the processed image which is blurred
the function calculate the every position and its neighbors' pixel color and then average then
set it as the new pixel's RGB
"""
sum_red = 0
sum_blue = 0
sum_green = 0
neighbors = 0
new_img = SimpleImage.blank(img.width, img.height)
for x in range(img.width):
for y in range(img.height):
new_pixel = new_img.get_pixel(x, y)
for i in range(-1, 2):
for j in range(-1, 2):
if x + i >= 0 and x+i <= img.width -1 and y + j >= 0 and y + j <= img.height -1:
sum_red += img.get_pixel(x + i, y + j).red
sum_blue += img.get_pixel(x + i, y + j).blue
sum_green += img.get_pixel(x + i, y + j).green
neighbors += 1
new_pixel.red = sum_red // neighbors
new_pixel.blue = sum_blue // neighbors
new_pixel.green = sum_green // neighbors
neighbors = 0
sum_red = 0
sum_blue = 0
sum_green = 0
return new_img | a4b9e98e97b7d4a27b76b8151fa91493b58fc4a6 | 3,655,192 |
def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str:
"""
Delete the annotation of a document.
:param document_id: ID of the document as integer
:param annotation_id: ID of the annotation as integer
:return: URL to delete annotation of a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/' | 470a9bb602c4a9327839f811f818e469747e6758 | 3,655,193 |
import subprocess
def subprocess_call_wrapper(lst, stdin=None):
"""Wrapper around the subprocess.call functions."""
print_debug('About to run "%s"' % ' '.join(lst))
try:
ret = subprocess.call(lst, stdin=stdin)
except (OSError, IOError):
ret = 127 # an error code
except IndexError:
ret = 127 # an error code
except KeyboardInterrupt:
ret = 127 # an error code
print_debug('Command "%s" returned %d' % (lst[0] if lst else '', ret))
return ret == 0 | 144723c065e0e194be2d8e3f424fbfaa47259ec7 | 3,655,194 |
def GetHomeFunctorViaPose():
""" Deprecated.
Returns a function that will move the robot to the home position when called.
"""
js_home = GetPlanToHomeService()
req = ServoToPoseRequest()
pose_home = GetHomePoseKDL()
req.target = pm.toMsg(pose_home)
open_gripper = GetOpenGripperService()
move = GetPlanToPoseService()
servo_mode = GetServoModeService()
def home():
rospy.loginfo("HOME: set servo mode")
servo_mode("servo")
rospy.loginfo("HOME: open gripper to drop anything")
open_gripper()
rospy.loginfo("HOME: move to config home")
max_tries = 10
tries = 0
res1 = None
while tries < max_tries and (res1 is None or "failure" in res1.ack.lower()):
res1 = js_home(ServoToPoseRequest())
tries += 1
if res1 is None or "failure" in res1.ack.lower():
rospy.logerr(res1.ack)
raise RuntimeError("HOME(): error moving to home1: " + str(res1.ack))
rospy.loginfo("HOME: move to pose over objects")
res2 = None
tries = 0
while tries < max_tries and (res2 is None or "failure" in res2.ack.lower()):
res2 = move(req)
tries += 1
if res2 is None or "failure" in res2.ack.lower():
rospy.logerr("move failed:" + str(res2.ack))
raise RuntimeError("HOME(): error moving to pose over workspace" + str(res2.ack))
rospy.loginfo("HOME: done")
return home | 7160f326c1aa0249da16dbfbf6fd740774284a4a | 3,655,195 |
import requests
def getAveragePlatPrice(item_name):
"""
Get the current average price of the item on the Warframe marketplace.
Args:
item_name (str): The name of the item.
Returns:
float: the average platinum market price of the item.
"""
avg_price = -1
item_name = clean(item_name)
item_info = requests.get(API + item_name.replace(" ", "_") + "/statistics").json()
try:
avg_price = item_info["payload"]["statistics_closed"]["48hours"][0]['avg_price']
except KeyError:
print(item_name + " is not listed on warframe.market.")
return avg_price | 221abd20125df49f40cfe246869a321943c5afbc | 3,655,196 |
def mode_strength(n, kr, sphere_type='rigid'):
"""Mode strength b_n(kr) for an incident plane wave on sphere.
Parameters
----------
n : int
Degree.
kr : array_like
kr vector, product of wavenumber k and radius r_0.
sphere_type : 'rigid' or 'open'
Returns
-------
b_n : array_like
Mode strength b_n(kr).
References
----------
Rafaely, B. (2015). Fundamentals of Spherical Array Processing. Springer.
eq. (4.4) and (4.5).
"""
if sphere_type == 'open':
b_n = 4*np.pi*1j**n * scyspecial.spherical_jn(n, kr)
elif sphere_type == 'rigid':
b_n = 4*np.pi*1j**n * (scyspecial.spherical_jn(n, kr) -
(scyspecial.spherical_jn(n, kr, True) /
spherical_hn2(n, kr, True)) *
spherical_hn2(n, kr))
else:
raise ValueError('sphere_type Not implemented.')
return b_n | 888981e34d444934e1c4b3d25c3042deabbe5005 | 3,655,197 |
from pathlib import Path
def data_dir(test_dir: Path) -> Path:
"""
Create a directory for storing the mock data set.
"""
_data_dir = test_dir / 'data'
_data_dir.mkdir(exist_ok=True)
return _data_dir | 3b204816252a2c87698197a416a4e2de218f639d | 3,655,198 |
import multiprocessing
def get_runtime_brief():
""" A digest version of get_runtime to be used more frequently """
return {"cpu_count": multiprocessing.cpu_count()} | 9dbb54c476d303bae401d52ce76197e094ee5d71 | 3,655,199 |
Subsets and Splits