content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def set_diff(seq0, seq1):
"""Return the set difference between 2 sequences as a list."""
return list(set(seq0) - set(seq1)) | 5,352,400 |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode):
"""Loads a data file into a list of input features."""
'''
output_mode: classification or regression
'''
if (label_list != None):
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert(len(input_ids) == max_seq_length)
assert(len(input_mask) == max_seq_length)
assert(len(segment_ids) == max_seq_length)
if (label_list != None):
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
else:
label_id = None
features.append(
InputFeatures(tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features | 5,352,401 |
def test_set_wrapper():
"""
Wrapper function to compute test datasets of fixed widths using multiprocessing.
Widths are defined in the parameter file.
Ouputs are in Neural/ folder
"""
from .parameter import ModelParameters
import multiprocessing as mp
from .neural import test_dataset
a = ModelParameters()
widths = a.test_sigma_widths
p = mp.Pool() # Do all 5 in parallel
p.imap_unordered(test_dataset,widths)
return None | 5,352,402 |
def get_unapproved_csr_names(kubeconfig_path: str) -> List[str]:
"""
Returns a list of names of all CertificateSigningRequest resources which
are unapproved.
May raise SubprocessError
"""
return [
csr["metadata"]["name"]
for csr in oc_list(kubeconfig_path, "csr")
if not _has_condition(resource=csr, type="Approved", status="True")
] | 5,352,403 |
def iteritemsdeep(dct):
"""
Works like ``dict.iteritems`` but iterate over all descendant items
>>> dct = dict(a=1, b=2, c=dict(d=3, e=4))
>>> sorted(iteritemsdeep(dct))
[(('a',), 1), (('b',), 2), (('c', 'd'), 3), (('c', 'e'), 4)]
"""
for (key, val) in dct.items():
if isinstance(val, dict):
for (key_child, val_child) in iteritemsdeep(val):
yield ((key,) + key_child, val_child)
else:
yield ((key,), val) | 5,352,404 |
def nmea_decoder(sentence: str, data: dict, mag_var: float) -> None:
"""
Decodes a received NMEA 0183 sentence into variables and adds them to current data store
:param sentence: received NMEA sentence
:param data: variables extracted
:param mag_var: Magnetic Variation for conversion true to magnetic
"""
code = ""
try:
if len(sentence) > 9:
code = sentence[3:6]
if code in sentences:
sentence_data = get_sentence_data(sentence, sentences[code], mag_var)
if sentence_data.get('status', 'A') == 'A':
for n, v in sentence_data.items():
data[n] = v
else:
for n, v in sentence_data.items():
if n in ['time', 'date', 'status']:
data[n] = v
elif data.get(n):
del data[n]
except (AttributeError, ValueError, ) as err:
data['error'] = f"NMEA {code} sentence translation error: {err} when processing {sentence}"
print(data['error']) | 5,352,405 |
def function(n, m, f):
"""Assumes that n = m = 1. The argument f is a Python function that takesas input an n-bit string alpha and
returns as output an m-bit string f(alpha). See deutschTest for examples of f. This function returns the (n +
m)-qbit gate F that corresponds to f. """
F = np.zeros((2**(n+m),2**(n+m)), dtype=one.dtype)
for a in range(0,2**n):
for b in range(0,2**m):
alpha = string(n,a)
beta = string(m,b)
beta_new = addition(beta,f(alpha))
row_bits = alpha + beta_new
col_bits = alpha + beta
F[integer(row_bits)][integer(col_bits)] = 1 + 0j
return F | 5,352,406 |
def get_study_list_qs(user, query_dict):
"""Gets a study list query set annotated with response counts.
TODO: Factor in all the query mutation from the (view) caller.
TODO: Upgrade to Django 2.x and use the improved query mechanisms to clean this up.
Args:
user: django.utils.functional.SimpleLazyObject masquerading as a user.
query_dict: django.http.QueryDict from the self.request.GET property.
Returns:
A heavily annotated queryset for the list of studies.
"""
annotated_responses_qs = get_annotated_responses_qs().only(
"id",
"completed",
"completed_consent_frame",
"date_created",
"date_modified",
"is_preview",
)
queryset = (
studies_for_which_user_has_perm(user, StudyPermission.READ_STUDY_DETAILS)
# .select_related("lab")
# .select_related("creator")
.only(
"id",
"state",
"uuid",
"name",
"date_modified",
"short_description",
"image",
"comments",
"lab__name",
"creator__given_name",
"creator__family_name",
)
.exclude(state="archived")
.filter(lab_id__isnull=False, creator_id__isnull=False)
.annotate(
lab_name=F("lab__name"),
creator_name=Concat(
"creator__given_name", Value(" "), "creator__family_name"
),
completed_responses_count=SubqueryCount(
Response.objects.filter(
study=OuterRef("pk"),
is_preview=False,
completed_consent_frame=True,
completed=True,
).values("id")
),
incomplete_responses_count=SubqueryCount(
Response.objects.filter(
study=OuterRef("pk"),
is_preview=False,
completed_consent_frame=True,
completed=False,
).values("id")
),
valid_consent_count=SubqueryCount(
annotated_responses_qs.filter(
study=OuterRef("pk"), is_preview=False, current_ruling="accepted"
)
),
pending_consent_count=SubqueryCount(
annotated_responses_qs.filter(
study=OuterRef("pk"), is_preview=False, current_ruling="pending"
)
),
starting_date=Subquery(
StudyLog.objects.filter(study=OuterRef("pk"))
.order_by("-created_at")
.filter(action="active")
.values("created_at")[:1]
),
ending_date=Subquery(
StudyLog.objects.filter(study=OuterRef("pk"))
.order_by("-created_at")
.filter(action="deactivated")
.values("created_at")[:1]
),
)
)
# Request filtering
state = query_dict.get("state")
if state and state != "all":
if state == "myStudies":
queryset = queryset.filter(creator=user)
else:
queryset = queryset.filter(state=state)
match = query_dict.get("match")
if match:
queryset = queryset.filter(
reduce(
operator.and_,
(
Q(name__icontains=term) | Q(short_description__icontains=term)
for term in match.split()
),
)
)
sort = query_dict.get("sort", "")
if "name" in sort:
queryset = queryset.order_by(
Lower("name").desc() if "-" in sort else Lower("name").asc()
)
elif "beginDate" in sort:
queryset = queryset.order_by("starting_date")
elif "endDate" in sort:
queryset = queryset.order_by("ending_date")
return queryset | 5,352,407 |
def get_life_stages(verbose: bool = False) -> pd.DataFrame:
"""Get table of life stages.
Parameters
----------
verbose : bool
If True, prints the SQL statement used to query the database.
Returns
-------
pandas DataFrame
"""
return __basic_query(LifeStage, verbose=verbose) | 5,352,408 |
def bytes_to(value_in_bytes: float, rnd: int | None = ...) -> str:
"""
:param value_in_bytes: the value in bytes to convert
:param rnd: number of digits to round to
:return: formatted string
"""
sizes = ["bytes", "KB", "MB", "GB", "TB"]
now = int()
while value_in_bytes > 1024:
value_in_bytes /= 1024
now += 1
if rnd is not ...:
value_in_bytes = round(value_in_bytes, rnd)
return f"{value_in_bytes} {sizes[now]}" | 5,352,409 |
def lambda_handler(event, context):
"""Main Function"""
page_iterator = PAGINATOR.paginate(**OPERATION_PARAMETERS)
for page in page_iterator:
functions = page['Functions']
for function in functions:
funct = {
"Name": function['FunctionName'],
"Version": function['Version'],
"CodeSize": function['CodeSize']
}
funct = json.dumps(funct)
ALL_FUNCTIONS.add(funct)
total = 0
for i in sorted(ALL_FUNCTIONS):
i = json.loads(i)
print("{function:48}:{version:8} {size:,.2f}".format(
function=i['Name'], version=i['Version'], size=i['CodeSize']))
total += i['CodeSize']
# Convert bytes to MB
total = total / 1024 / 1024
data = "Lambda code storage: {}".format(str(total))
print(data)
return {
'statusCode': 200,
'body': json.dumps(data)
} | 5,352,410 |
def _find_first_print(body):
""" This function finds the first print of something """
for (i, inst) in enumerate(body):
if isinstance(inst, ir.Print):
return i
return -1 | 5,352,411 |
def main():
"""
Handles parameters for the file to run
:return:
"""
input_path = sys.argv[1]
output_path = sys.argv[2]
support_thresold = int(sys.argv[3])
broadcast = 1
if len(sys.argv) > 4:
broadcast = int(sys.argv[4])
pcy = PCYFrequentItems(is_debug=True)
if broadcast == 1:
is_broadcast = True
else:
is_broadcast = False
pcy.frequent_items(input_path, output_path, support_thresold, is_broadcast) | 5,352,412 |
def clear_screen():
"""
Clears Python Interpreter Terminal Window Screen
"""
try:
command = 'cls' if os.name in ('nt', 'dos') else 'clear'
os.system(command)
except:
pass | 5,352,413 |
def list_mix(set_key, encoding, in_set = ""):
""" Returns: Seeded Random Shuffle of Input Set by Input Key. """
if in_set == "": char_set = list(encoding["set"])
else: char_set = in_set
seed(set_key)
return sample(char_set, len(char_set)) | 5,352,414 |
def leslie(f, s):
"""Create a Leslie matrix.
Given the length n array of fecundity coefficients ``f`` and the length n-1
array of survival coefficients ``s``, return the associated Leslie matrix.
Args:
f (cupy.ndarray): The "fecundity" coefficients.
s (cupy.ndarray): The "survival" coefficients, has to be 1-D. The
length of ``s`` must be one less than the length of ``f``, and it
must be at least 1.
Returns:
cupy.ndarray: The array is zero except for the first row, which is
``f``, and the first sub-diagonal, which is ``s``. The data-type of
the array will be the data-type of ``f[0]+s[0]``.
.. seealso:: :func:`scipy.linalg.leslie`
"""
if f.ndim != 1:
raise ValueError('Incorrect shape for f. f must be 1D')
if s.ndim != 1:
raise ValueError('Incorrect shape for s. s must be 1D')
n = f.size
if n != s.size + 1:
raise ValueError('Length of s must be one less than length of f')
if s.size == 0:
raise ValueError('The length of s must be at least 1.')
a = cupy.zeros((n, n), dtype=cupy.result_type(f, s))
a[0] = f
cupy.fill_diagonal(a[1:], s)
return a | 5,352,415 |
def __convert_node(node, default_value='', default_flags=vsflags()):
"""Converts a XML node to a JSON equivalent."""
name = __get_attribute(node, 'Name')
logging.debug('Found %s named %s', node.tagName, name)
converted = {}
converted['name'] = name
switch = __get_attribute(node, 'Switch')
separator = __get_attribute(node, 'Separator')
converted['switch'] = __normalize_switch(switch, separator)
converted['comment'] = __get_attribute(node, 'DisplayName')
converted['value'] = default_value
# Check for the Flags attribute in case it was created during preprocessing
flags = __get_attribute(node, 'Flags')
if flags:
flags = flags.split(',')
else:
flags = default_flags
converted['flags'] = flags
return converted | 5,352,416 |
def generate(generations, population, nn_param_choices, dataset, dataset_TB_folder_name):
"""Generate a network with the genetic algorithm.
Args:
generations (int): Number of times to evole the population
population (int): Number of networks in each generation
nn_param_choices (dict): Parameter choices for networks
dataset (str): Dataset to use for training/evaluating
dataset_TB_folder_name (str): Name of the parent folder that holds the multiple run tensorboard result.
"""
optimizer = Optimizer(nn_param_choices)
networks = optimizer.create_population(population)
# Evolve the generation.
for i in range(generations):
logging.info("***Doing generation %d of %d***" %
(i + 1, generations))
# Train and get accuracy for networks.
train_networks(networks, dataset, i, dataset_TB_folder_name)
# Get the average accuracy for this generation.
average_accuracy = get_average_accuracy(networks)
# Print out the average accuracy each generation.
logging.info("Generation average: %.2f%%" % (average_accuracy * 100))
logging.info('-'*80)
# Evolve, except on the last iteration.
if i != generations - 1:
# Do the evolution.
networks = optimizer.evolve(networks)
#pp = pprint.PrettyPrinter(indent=4)
#for network in networks:
# pp.pprint(network.network)
# Sort our final population.
networks = sorted(networks, key=lambda x: x.accuracy, reverse=True)
# Print out the top 5 networks.
print_networks(networks[:5]) | 5,352,417 |
def document(input_file, output_path=None, recursive=False, prefix=None):
"""
Handler for documenting CMake files or all files in a directory. Performs
preprocessing before handing off to document_single_file over all detected
files. Also generates index.rst files for all directories.
:param input_file: String locating a file or directory to document.
:param output_path: String pointing to the directory to place generated files,
will output to stdout if None
:param recursive: Whether to generate documentation for subdirectories or not.
:param prefix: Prefix to be prepended to all RST titles. In recursive mode,
root files will have their titles replaced by the prefix.
"""
input_path = os.path.abspath(input_file)
if not os.path.exists(input_path):
print(f"Error: File or directory \"{input_path}\" does not exist", file=sys.stderr)
exit(-1)
elif os.path.isdir(input_path):
last_dir_element = os.path.basename(os.path.normpath(input_file))
prefix = prefix if prefix is not None else last_dir_element
# Walk dir and add cmake files to list
for root, subdirs, filenames in os.walk(input_path):
# Sort filenames and subdirs in alphabetical order
filenames = sorted(filenames)
subdirs = sorted(subdirs)
if output_path is not None:
path = os.path.join(output_path, os.path.relpath(root, input_path))
os.makedirs(path, exist_ok=True) # Make sure we have all the directories created
rel_path = os.path.relpath(root, input_path)
index = RSTWriter(rel_path)
if prefix is not None:
# If current file dir is same as root dir, replace "." with prefix
if index.title == ".":
index.title = prefix
else:
# Add prefix to beginning of header
index.title = prefix + "." + index.title
toctree = index.directive("toctree")
toctree.option("maxdepth", 2)
for file in [f for f in filenames if f.lower().endswith(".cmake")]:
toctree.text('.'.join(file.split('.')[:-1]))
if recursive:
for directory in subdirs:
toctree.text(os.path.join(directory, "index.rst"))
index.write_to_file(os.path.join(os.path.join(output_path, rel_path), "index.rst"))
for file in filenames:
if "cmake" == file.split(".")[-1].lower():
document_single_file(os.path.join(root, file), input_path, output_path, prefix)
if not recursive:
break
elif os.path.isfile(input_path):
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
document_single_file(input_path, input_path, output_path, prefix)
else:
print("File is a special file (socket, FIFO, device file) and is unsupported", file=sys.stderr)
exit(1) | 5,352,418 |
def clean():
"""
cleans shopyo.db __pycache__ and migrations/
Parameters
----------
Returns
-------
None
...
"""
if os.path.exists("shopyo.db"):
os.remove("shopyo.db")
print("shopyo.db successfully deleted")
else:
print("shopyo.db doesn't exist")
if os.path.exists("__pycache__"):
shutil.rmtree("__pycache__")
print("__pycache__ successfully deleted")
else:
print("__pycache__ doesn't exist")
if os.path.exists("migrations"):
shutil.rmtree("migrations")
print("migrations successfully deleted")
else:
print("migrations folder doesn't exist") | 5,352,419 |
def clone(output, replace=None, *args, **kwargs):
"""
Use as theano.clone().
TODO: Something useful with non-symbolic output ?
"""
if not core.is_theano_object(output):
raise ValueError("`shim.graph.clone()` is undefined for non-symbolic outputs")
return core.gettheano().clone(output, replace, *args, **kwargs) | 5,352,420 |
def get_clustermgtd_heartbeat(clustermgtd_heartbeat_file_path):
"""Get clustermgtd's last heartbeat."""
# Use subprocess based method to read shared file to prevent hanging when NFS is down
# Do not copy to local. Different users need to access the file, but file should be writable by root only
# Only use last line of output to avoid taking unexpected output in stdout
heartbeat = (
check_command_output(
f"cat {clustermgtd_heartbeat_file_path}",
timeout=DEFAULT_COMMAND_TIMEOUT,
shell=True, # nosec
)
.splitlines()[-1]
.strip()
)
# Note: heartbeat must be written with datetime.strftime to convert localized datetime into str
# datetime.strptime will not work with str(datetime)
# Example timestamp written to heartbeat file: 2020-07-30 19:34:02.613338+00:00
return datetime.strptime(heartbeat, TIMESTAMP_FORMAT) | 5,352,421 |
def logout():
"""
Logs out user by deleting token cookie and redirecting to login page
"""
APP.logger.info('Logging out.')
resp = make_response(redirect(url_for('login_page',
_external=True,
_scheme=APP.config['SCHEME'])))
resp.delete_cookie(APP.config['TOKEN_NAME'])
return resp | 5,352,422 |
async def test_templates_with_valid_values(opp, calls):
"""Test templates with valid values."""
with assert_setup_component(1, "vacuum"):
assert await setup.async_setup_component(
opp,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_vacuum": {
"value_template": "{{ 'cleaning' }}",
"battery_level_template": "{{ 100 }}",
"start": {"service": "script.vacuum_start"},
}
},
}
},
)
await opp.async_block_till_done()
await opp.async_start()
await opp.async_block_till_done()
_verify(opp, STATE_CLEANING, 100) | 5,352,423 |
def fileDescribe(*args, **kwargs):
"""
.. deprecated:: 0.42.0
Use :func:`file_describe()` instead.
"""
print("dxpy.fileDescribe is deprecated; please use file_describe instead.", file=sys.stderr)
return file_describe(*args, **kwargs) | 5,352,424 |
def ATR(df, n, high_column='High', low_column='Low', close_column='Close',
join=None, dropna=False, dtype=None):
"""
Average True Range
"""
high_series = df[high_column]
low_series = df[low_column]
close_prev_series = df[close_column].shift(1)
tr = np.max((
(high_series.values - low_series.values),
np.abs(high_series.values - close_prev_series.values),
np.abs(low_series.values - close_prev_series.values),
), 0)
tr = pd.Series(tr, name=type(join) is list and join[0] or join)
if len(tr) > n:
tr[n] = tr[1:n+1].mean()
nm1 = n - 1
for i in range(n+1, len(tr)):
tr[i] = (tr[i-1] * nm1 + tr[i]) / n
tr[:n] = np.nan
return out(df, tr, bool(join), dropna, dtype) | 5,352,425 |
def setup_logging(outdir):
"""
Setup logging system.
Log is written to 'mergeFastqs.log'.
Args:
outdir: Output directory
"""
logger = logging.getLogger("mergeFQs")
logger.setLevel(logging.DEBUG)
if not os.path.exists(outdir):
os.makedirs(outdir)
log_file = os.path.join(outdir, "mergeFastqs.log")
# create file handler which logs even debug messages
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
# create formatter and add it to the handlers
format_str = "[%(asctime)s] %(levelname)s %(name)s: %(message)s"
formatter = logging.Formatter(format_str, "%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
format_str = "[%(asctime)s] %(message)s"
formatter = logging.Formatter(format_str, "%H:%M:%S")
console_handler.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(console_handler)
logger.addHandler(file_handler) | 5,352,426 |
def pytest_addoption(parser):
""" Load in config path. """
group = parser.getgroup(
"Aries Protocol Test Suite Configuration",
"Aries Protocol Test Suite Configuration",
after="general"
)
group.addoption(
"--sc",
"--suite-config",
dest='suite_config',
action="store",
metavar="SUITE_CONFIG",
help="Load suite configuration from SUITE_CONFIG",
)
group.addoption(
"-S",
"--select",
dest='select',
action='store',
metavar='SELECT_REGEX',
help='Run tests matching SELECT_REGEX. '
'Overrides tests selected in configuration.'
)
group.addoption(
"-O",
"--output",
dest="save_path",
action="store",
metavar="PATH",
help="Save interop profile to PATH."
)
group.addoption(
"-L",
"--list",
dest="list_tests",
action="store_true",
help="List available tests."
)
group.addoption(
"--show-dev-notes",
dest="dev_notes",
action="store_true",
help="Output log messages generated during testing for developers\n"
"take note of."
) | 5,352,427 |
def lookup(symbol):
"""Look up quote for symbol."""
# Contact API
try:
api_key = os.environ.get("API_KEY")
url = f"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(symbol)}/quote?token={api_key}"
response = requests.get(url)
response.raise_for_status()
except requests.RequestException:
return None
# Parse response
try:
quote = response.json()
return {
"name": quote["companyName"],
"price": float(quote["latestPrice"]),
"symbol": quote["symbol"]
}
except (KeyError, TypeError, ValueError):
return None | 5,352,428 |
def get_full_history(sender, dialog_id):
"""Download the full history for the selected dialog"""
page = 0
limit = 100
history = []
print('Downloading messages...')
while True:
sleep(REQUEST_DELAY)
offset = page * limit
try:
history[0:0] = sender.history(dialog_id, limit, offset)
print('.', end=' ', flush=True)
except IllegalResponseException:
print('\n{} messages found in selected dialog'.format(len(history)))
break
page += 1
print('')
return history | 5,352,429 |
def test_mul_same_number():
"""Test a case where we multiply
"""
result = num2 * num2
assert result.val == 4
assert result.jacobian(num2) == 4 | 5,352,430 |
def predict_transposition_cost(shape, perm, coefs=None):
"""
Given a shape and a permutation, predicts the cost of the
transposition.
:param shape: shape
:param perm: permutation
:param coefs: trained coefficients or None to get
the default ones
:return: dictionary of features
"""
if coefs is None:
coefs = _ml_transpose_coefs
feat = compute_transposition_features(shape, perm)
res = 0
for k, v in feat.items():
res += v * coefs[k]
return max(0., res / 1000) | 5,352,431 |
def checkBuildAMR(parfile,cellfile,**kwargs):
"""
Purpose
-------
Check that BuildAMRfromParticles.f90 builds the cells around the particles
created by mkClouds.f90 in the right places.
Only cloud cells are plotted. If you want to include the field cells, in
BuildAMRfromParticles.f90's subroutine CountCells() remove the part
" .and. CurrentCell%phase.eq.1" from the if statement (and recompile).
Keywords
--------
Robs: Plot only within Robs kpc of center
Usage
-----
>>> checkBuildAMR('clouds_par.dat','clouds_cell.dat',Robs=.1)
"""
def _iltR(x,y,z,R):
return np.where((x<=R) & (y<=R) & (z<=R))
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111, projection='3d')
x,y,z = np.loadtxt(parfile,unpack=True)
print('n_par =', len(x))
if len(x) > 100000: print('WARNING: This is going to be slow!')
# ax.set_alpha(.01)
if 'Robs' in kwargs:
Robs = kwargs['Robs']
ax.set_xlim([-Robs,Robs])
ax.set_ylim([-Robs,Robs])
ax.set_zlim([-Robs,Robs])
ind = _iltR(x,y,z,Robs)
ax.scatter(x[ind],y[ind],z[ind],s=1,label='Particles')
else:
ax.scatter(x,y,z,s=1,label='Particles')
x,y,z = np.loadtxt(cellfile,unpack=True)
print('n_cell =', len(x))
if 'Robs' in kwargs:
ind = _iltR(x,y,z,Robs)
ax.scatter(x[ind],y[ind],z[ind],s=1,label='Cells (excluding ICM cells)')
else:
ax.scatter(x,y,z,s=1,label='Cells (excluding ICM cells)')
ax.legend() | 5,352,432 |
def dist(df):
"""
Calculate Euclidean distance on a dataframe.
Input columns are arranged as x0, x1, y0, y1.
"""
return np.sqrt((df.iloc[:,0] - df.iloc[:,2])**2 + (df.iloc[:,1] - df.iloc[:,3])**2) | 5,352,433 |
def B_Calc(T, n=2):
"""
Calculate B (Constant in the mass transfer term).
:param T: cell operation temperature [K]
:type T : float
:param n: number of moles of electrons transferred in the balanced equation occurring in the fuel cell
:type n: int
:return: B as float
"""
try:
return (R * T) / (n * F)
except (TypeError, ZeroDivisionError):
return None | 5,352,434 |
def get_page_likes(response):
"""Scan a page and create a dictionary of the image filenames
and displayed like count for each image. Return the
dictionary."""
# find all flowtow divs
flowtows = response.html.find_all('div', class_='flowtow')
result = dict()
for div in flowtows:
# get the filename from the form hidden input
input = div.find("input", attrs={'name': "filename"})
filename = input['value']
# find the likes element
likesel = div.find(class_='likes')
# grab the integer from this element
m = re.search('\d+', likesel.text)
if m:
likes = int(m.group())
else:
likes = 0
result[filename] = likes
return result | 5,352,435 |
def firm(K, eta, alpha, delta):
"""Calculate return, wage and aggregate production.
r = eta * K^(alpha-1) * L^(1-alpha) + (1-delta)
w = eta * K^(alpha) * L^(-alpha)
Y = eta * K^(alpha) * L^(1-alpha) + (1-delta) * K
Args:
K: aggregate capital,
eta: TFP value,
alpha: output elasticity,
delta: depreciation value.
Returns:
return: return (marginal product of capital),
wage: wage (marginal product of labor),
Y: aggregate production.
"""
L = tf.ones_like(K)
r = alpha * eta * K**(alpha - 1) * L**(1 - alpha) + (1 - delta)
w = (1 - alpha) * eta * K**alpha * L**(-alpha)
Y = eta * K**alpha * L**(1 - alpha) + (1 - delta) * K
return r, w, Y | 5,352,436 |
def test_function_of_add_user_command_with_data_well(session):
"""
GIVEN the add_user function
WHEN when the function is called
THEN check for the user creation
"""
from app.commands import add_user
username = get_unique_username()
password = '123'
add_user(username, password)
from app.model import User
user = session.query(User).filter_by(username=username).first()
assert user
assert check_password_hash(user.password, password) | 5,352,437 |
def get_col(arr, col_name):
""" returns the column from a multi-dimensional array """
return map(lambda x : x[col_name], arr) | 5,352,438 |
def get_articles(id):
"""function that process the articles and a list of articles objects
"""
get_articles_url = articles_url.format(id, api_key)
with urllib.request.urlopen(get_articles_url) as url:
news_article_results = json.loads(url.read())
news_article_object = None
if news_article_results['articles']:
news_article_object = process_news_source(news_article_results['articles'])
return news_article_object | 5,352,439 |
def check_slot_exist(start_time,end_time):
"""
Description:
check_slot_exists is responsible for checking that a slot exists
before a volunteer can create it.
Parameters:
Takes two parameters of type datetime:
start_time:datetime
end_time:datetime
return:
returns Boolean type:
True or False:Boolean
"""
slot_data = read_from_local_data_file.read_from_file()
slots = slot_data['items']
for slot in slots:
end_time_slot = slot["end"]["dateTime"]
start_time_slot = slot["start"]["dateTime"]
if start_time >= start_time_slot.split("+",1)[0] and start_time <= end_time_slot.split("+",1)[0]:
if end_time >= start_time_slot.split("+",1)[0] and end_time <= end_time_slot.split("+",1)[0]:
return True
return False | 5,352,440 |
def format_relative_date(date):
"""Takes a datetime object and returns the date formatted as a string e.g. "3 minutes ago", like the real site.
This is based roughly on George Edison's code from StackApps:
http://stackapps.com/questions/1009/how-to-format-time-since-xxx-e-g-4-minutes-ago-similar-to-stack-exchange-site/1018#1018"""
now = datetime.datetime.now()
diff = (now - date).seconds
# Anti-repetition! These simplify the code somewhat.
plural = lambda d: 's' if d != 1 else ''
frmt = lambda d: (diff / float(d), plural(diff / float(d)))
if diff < 60:
return '%d second%s ago' % frmt(1)
elif diff < 3600:
return '%d minute%s ago' % frmt(60)
elif diff < 86400:
return '%d hour%s ago' % frmt(3600)
elif diff < 172800:
return 'yesterday'
else:
return date.strftime('M j / y - H:i') | 5,352,441 |
def task_install_book():
"""install the jupyter book and sphinx dependencies"""
if not jb:
yield dict(
name="install book deps",
actions=['pip install --rdocs/requirements.txt'],
targets=[config_changed(jb)]
) | 5,352,442 |
def Jacobian_rkhs_gaussian(x, vf_dict, vectorize=False):
"""analytical Jacobian for RKHS vector field functions with Gaussian kernel.
Arguments
---------
x: :class:`~numpy.ndarray`
Coordinates where the Jacobian is evaluated.
vf_dict: dict
A dictionary containing RKHS vector field control points, Gaussian bandwidth,
and RKHS coefficients.
Essential keys: 'X_ctrl', 'beta', 'C'
Returns
-------
J: :class:`~numpy.ndarray`
Jacobian matrices stored as d-by-d-by-n numpy arrays evaluated at x.
d is the number of dimensions and n the number of coordinates in x.
"""
if x.ndim == 1:
K, D = con_K(x[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J = (vf_dict['C'].T * K) @ D[0].T
elif not vectorize:
n, d = x.shape
J = np.zeros((d, d, n))
for i, xi in enumerate(x):
K, D = con_K(xi[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J[:, :, i] = (vf_dict['C'].T * K) @ D[0].T
else:
K, D = con_K(x, vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
if K.ndim == 1: K = K[None, :]
J = np.einsum('nm, mi, njm -> ijn', K, vf_dict['C'], D)
return -2 * vf_dict['beta'] * J | 5,352,443 |
def predict_images(detection_graph: tf.Graph, image_path: str, output_path: str, output_csv_path: str,
threshold: float = 0.3, save_csv: bool = True) -> Tuple[np.ndarray]:
"""Predict detection on image
Args:
detection_graph (tf.Graph): Graph of model to detect
image_path (str): path to image
output_path (str): output folder to write detected images to
output_csv_path (str): output folder to write csv of detections to
threshold (float, optional): detection threshold. Defaults to 0.3.
save_csv (bool, optional): whether csv files of detection should be saved. Defaults to True.
Returns:
Tuple[np.ndarray]: tuple of np arrays (all_boxes, all_scores, all_classes, all_num_detections)
"""
data = pd.DataFrame(columns=[
'filename', 'width', 'height', 'class', 'score', 'xmin', 'ymin', 'xmax', 'ymax'])
all_boxes, all_scores, all_classes, all_num_detections = [], [], [], []
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
for img in sorted(glob.glob(image_path)):
image_np, orig_w, orig_h = image_load_encode(img)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
#print('Image expanded: ', image_np.shape, image_np_expanded.shape)
image_tensor = detection_graph.get_tensor_by_name(
'image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
#print('Boxes: ', boxes, 'scores', scores, 'classes', classes, 'num dets', num_detections)
if save_csv:
all_boxes.append(boxes)
all_scores.append(scores)
all_classes.append(classes)
all_num_detections.append(num_detections)
boxes = boxes[0] * np.array([[512, 512, 512, 512]])
scores = scores[0]
classes = classes[0]
num_detections = int(num_detections[0])
# convert boxes to xmin, ymin, xmax, ymax. Currently it is ymin, xmin, ymax, xmax
boxes = boxes[:, [1, 0, 3, 2]]
# find out where scores are greater than at threshold and change everything according to that
thresh_indices = np.where(scores >= threshold)[0]
boxes = boxes[thresh_indices]
scores = scores[thresh_indices]
classes = classes[thresh_indices]
boxes, scores = postprocess(boxes, scores, theta=args.theta)
# Visualization of the results of a detection, but only if output_path is provided
if output_path is not None:
image_np = draw_boxes(
image_np, boxes, scores, disable_thresh=True)
orig_name = img.split('/')[-1].split('\\')[-1]
img_output_path = os.path.join(output_path, orig_name)
cv2.imwrite(img_output_path, image_np)
# always saving data to dataframe
if save_csv:
_ = write_to_df(data, img, orig_w, orig_h, output_csv_path,
'spine', boxes, scores, disable_thresh=True)
print('[INFO] Finished detection of image '+img+'.')
return all_boxes, all_scores, all_classes, all_num_detections | 5,352,444 |
def load_pickle(file):
"""Gets the file from the cPickle file."""
f = open(file, 'r')
d = cPickle.load(f)
f.close()
logger = get_logger()
logger.info("file %s loaded" % file)
return d | 5,352,445 |
def test_register_op_with_extending_steps_works():
"""
Calling the custom pipeline operation with an argument should yield the same
arguments passed back as a result
:return:
"""
test_pipe = Pipeline(STEPS, **PIPELINE_DEF_KWARGS)
def custom_op(doc, context=None, settings=None, **kwargs):
return settings
custom_argument = {'argument': 1}
test_pipe.register_operation('CUSTOM_STEP', custom_op)
test_pipe.steps.append(('CUSTOM_STEP', custom_argument))
results = test_pipe(TEXT)
assert results['CUSTOM_STEP'] == custom_argument | 5,352,446 |
def get_dates_for_last_30_days(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the last 30 days.
The first tuple contains t-61, t-31.
The second tuple contains t-30, t.
"""
rca_start_date = end_date - timedelta(days=30)
base_end_date = rca_start_date - timedelta(days=1)
base_start_date = base_end_date - timedelta(days=30)
return (base_start_date, base_end_date), (rca_start_date, end_date) | 5,352,447 |
def get_3rd_friday():
"""获取当前月的第三个星期五"""
first_day_in_month = datetime.now().replace(day=1) # 本月第一天
# 获取当前月的所有星期5的日
fridays = [i for i in range(1, 28) if (first_day_in_month + timedelta(days=i - 1)).isoweekday() == 5]
if len(fridays) < 3:
raise Exception(f'获取当前月异常:{fridays}')
# 第三个星期五,是第几天
third_friday = fridays[2]
return datetime.now().replace(day=third_friday) | 5,352,448 |
def retrieve_article_pdf_from_ads(bibcode, eprint_or_pub="PUB"):
"""
Get the PDF file for a given bibcode
"""
endpoint = f"{eprint_or_pub.upper()}_PDF"
safe_bibcode = quote(bibcode)
pdf_filename = f"{safe_bibcode}_{eprint_or_pub.lower()}.pdf"
url = f"{LINK_GATEWAY_BASE_URL}/{safe_bibcode}/{endpoint}"
r = requests.get(
url,
allow_redirects=True,
)
with open(pdf_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
return pdf_filename | 5,352,449 |
def get_account_info():
"""account information"""
method = 'GET'
path = '/open/api/v2/account/info'
url = '{}{}'.format(ROOT_URL, path)
params = _sign(method, path)
response = requests.request(method, url, params=params)
return response.json() | 5,352,450 |
def str_product(string):
""" Calculate the product of all digits in a string """
product = 1
for i in string:
product *= int(i)
return product | 5,352,451 |
def convolution_filter_grad_backward(inputs, base_axis=1, pad=None, stride=None,
dilation=None, group=1, channel_last=False):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
gdw = inputs[0]
dy = inputs[1]
x0 = inputs[2]
ctx = nn.get_current_context()
dfx = ConvolutionDataGrad(
ctx, base_axis, pad, stride, dilation, group, channel_last)
dfx.xshape = x0.shape
gdy = F.convolution(x0, gdw, None, base_axis, pad,
stride, dilation, group, channel_last)
gx0 = dfx(dy, gdw)
return gdy, gx0 | 5,352,452 |
def do_after_terminate(source, after_terminate):
"""Invokes an action after an on_complete() or on_error() event.
This can be helpful for debugging, logging, and other side effects
when completion or an error terminates an operation
on_terminate -- Action to invoke after on_complete or throw is called
"""
def subscribe(observer, scheduler=None):
def on_completed():
observer.on_completed()
try:
after_terminate()
except Exception as err: # pylint: disable=broad-except
observer.on_error(err)
def on_error(exception):
observer.on_error(exception)
try:
after_terminate()
except Exception as err: # pylint: disable=broad-except
observer.on_error(err)
return source.subscribe(observer.on_next, on_error, on_completed, scheduler)
return Observable(subscribe) | 5,352,453 |
def _get_dload_scale(dload,
xyz_scale: float,
velocity_scale: float,
accel_scale: float,
force_scale: float) -> None:
"""
LOAD asssumes force
"""
if dload.Type == 'LOAD':
scale = force_scale
elif dload.Type == 'DISP':
scale = xyz_scale
elif dload.Type == 'VELO':
scale = velocity_scale
elif dload.Type == 'ACCE':
scale = accel_scale
else:
raise RuntimeError(dload)
return scale | 5,352,454 |
def job_complete(job):
"""
Should be called whenever a job is completed.
This will update the Git server status and make
any additional jobs ready.
"""
job_complete_pr_status(job)
create_issue_on_fail(job)
start_canceled_on_fail(job)
ParseOutput.set_job_info(job)
ProcessCommands.process_commands(job)
job.update_badge()
all_done = job.event.set_complete_if_done()
if all_done:
event_complete(job.event)
unrunnable = job.event.get_unrunnable_jobs()
for norun in unrunnable:
logger.info("Job %s: %s will not run due to failed dependencies" % (norun.pk, norun))
job_wont_run(norun)
return all_done | 5,352,455 |
def direct(sp_script_str, run_dir, nsamp, njobs,
tgt_geo, bath_geo, thy_info, charge, mult,
smin=3.779, smax=11.339, spin_method=1, ranseeds=None):
""" Write input and run output.
:param sp_script_str: submission script for single-point calculation
:type sp_script_str: str
:param run_dir: directory where all OneDMin jobs are run
:type run_dir: str
:param nsamp: number of samples to run PER OneDMin job
:type nsamp: int
:param njobs: number of OneDMin instances to run in parallel
:type njobs: int
:param tgt_geo: geometry of the target molecule
:type tgt_geo: automol geometry data structure
:param bath_geo: geometry of the bath molecule
:type bath_geo: automol geometry data structure
:param thy_info: theory info object (prog, method, basis, orb_lbl)
:type thy_info: tuple(str, str, str, str)
:param charge: charge of the target-molecule complex
:type charge: int
:param mult: multiplicity of the target-molecule complex
:type mult: int
:param smin: minimum allowed intermolecular separation
:type smin: float
:param smax: maximum allowed intermolecular separation
:type smax: float
:param spin_method: parameter for the spin method
:type spin_method: int
:param ranseed: seed-integer for the orientational sampling
:type ranseed: int
:rtype: (float, float)
"""
# Write the main input files for all runs (breaks if ranseeds not given)
input_str_lst = ()
for ranseed in ranseeds:
input_str_lst += (
onedmin_io.writer.input_file(
nsamp, smin, smax,
ranseed=ranseed, spin_method=spin_method),
)
# Write the aux inputs; same for all runs
tgt_str = automol.geom.string(tgt_geo)
bath_str = automol.geom.string(bath_geo)
elstruct_inp_str, onedmin_exe_name = _set_pot_info(thy_info, charge, mult)
aux_dct = {
'target.xyz': tgt_str,
'bath.xyz': bath_str,
'qc.mol': elstruct_inp_str,
'qc.x': sp_script_str
}
# Write the script string for submission (for all runs)
script_str = onedmin_io.writer.submission_script(
njobs, run_dir, onedmin_exe_name)
# Run the code
output_str_lst = from_parallel_input_strings(
script_str, run_dir, input_str_lst,
aux_dct=aux_dct,
input_name=INPUT_NAME,
output_names=OUTPUT_NAMES)
return input_str_lst, elstruct_inp_str, output_str_lst | 5,352,456 |
def test_force_grid_wrap() -> None:
"""Ensures removing imports works as expected."""
test_input = "from bar import lib2\nfrom foo import lib6, lib7\n"
test_output = isort.code(
code=test_input, force_grid_wrap=2, multi_line_output=WrapModes.VERTICAL_HANGING_INDENT
)
assert (
test_output
== """from bar import lib2
from foo import (
lib6,
lib7
)
"""
)
test_output = isort.code(
code=test_input, force_grid_wrap=3, multi_line_output=WrapModes.VERTICAL_HANGING_INDENT
)
assert test_output == test_input | 5,352,457 |
def corpus_loader(folder: str):
"""
A corpus loader function which takes in a path to a
folder.
"""
# iterate through all file
for file in os.listdir(folder):
file_path = f"{folder}/{file}"
yield read_text_file(file_path) | 5,352,458 |
def naive_scheduler(task_qs, max_workers, old_worker_map, to_die_list, logger):
""" Return two items (as one tuple) dict kill_list :: KILL [(worker_type, num_kill), ...]
dict create_list :: CREATE [(worker_type, num_create), ...]
In this scheduler model, there is minimum 1 instance of each nonempty task queue.
"""
logger.debug("Entering scheduler...")
q_sizes = {}
q_types = []
new_worker_map = {}
# ## Added to disallow rescheduling workers we're waiting to spin down ## #
blocked_workers = 0
blocked_types = []
for w_type in to_die_list:
if to_die_list[w_type] > 0:
if old_worker_map is not None:
blocked_workers += old_worker_map[w_type] # These workers cannot be replaced.
blocked_types.append(w_type)
new_worker_map[w_type] = old_worker_map[w_type] # Keep the same.
# ## ****************************************************************# ## #
# Remove blocked workers from max workers.
max_workers -= blocked_workers
# Sum the size of each *available* (unblocked) task queue
sum_q_size = 0
for q_type in task_qs:
if q_type not in blocked_types:
q_types.append(q_type)
q_size = task_qs[q_type].qsize()
sum_q_size += q_size
q_sizes[q_type] = q_size
if sum_q_size > 0:
logger.info("[SCHEDULER] Total number of tasks is {}".format(sum_q_size))
# Set proportions of workers equal to the proportion of queue size.
for q_type in q_sizes:
ratio = q_sizes[q_type] / sum_q_size
new_worker_map[q_type] = int(math.floor(ratio * max_workers))
# CLEANUP: Assign the difference here to any random worker. Should be small.
difference = round(max_workers - sum(new_worker_map.values()))
logger.info("[SCHEDULER] Offset difference: {}".format(difference))
logger.info("[SCHEDULER] Queue Types: {}".format(q_types))
if len(q_types) > 0:
for i in range(difference):
win_q = random.choice(q_types)
new_worker_map[win_q] += 1
logger.debug(new_worker_map)
return new_worker_map
else:
return None | 5,352,459 |
def run(ceph_cluster, **kwargs) -> int:
"""
Method that executes the external test suite.
Args:
ceph_cluster The storage cluster participating in the test.
kwargs The supported keys are
config contains the test configuration
Returns:
0 - Success
1 - Failure
"""
LOG.info("Running RBD Sanity tests.")
config = kwargs["config"]
script_dir = config["script_path"]
script = config["script"]
branch = config.get("branch", "pacific")
nodes = config.get("nodes", [])
if nodes:
nodes = get_nodes_by_ids(ceph_cluster, nodes)
else:
# By default, tests would be executed on a single client node
nodes = [ceph_cluster.get_nodes(role="client")[0]]
for node in nodes:
one_time_setup(node, branch=branch)
cmd = f"cd ceph/{script_dir}; sudo bash {script}"
if script == "*":
cmd = f"cd ceph/{script_dir}; for test in $(ls); do sudo bash $test; done"
node.exec_command(cmd=cmd, check_ec=True, timeout=1200)
return 0 | 5,352,460 |
def search_item(search_term, next=False, page=0, board=0):
"""function to search and return comments"""
if next == False:
page = requests.get("https://www.nairaland.com/search?q=" + urllib.parse.quote_plus(str(search_term)) + "&board="+str(board))
else:
page = requests.get("https://www.nairaland.com/search/"
+ str(search_term) + "/0/"+str(board)+"/0/1" + str(page))
soup = BeautifulSoup(page.content, 'html.parser')
comments = soup.findAll("div", {"class": "narrow"})
return comments | 5,352,461 |
def is_valid_action(state, x, y, direction):
"""
Checks if moving the piece at given x, y coordinates in the given direction is valid, given the current state.
:param state: the current state
:param x: the x coordinate of the piece
:param y: the y coordinate of the piece
:param direction: the direction to travel with this action
:return: True if the action is valid, False otherwise
"""
new_x = x + X_MOVEMENT_DIFFS[direction]
new_y = y + Y_MOVEMENT_DIFFS[direction]
return is_within_bounds(new_x, new_y) and is_free_square(state, new_x, new_y) | 5,352,462 |
def range_(minimum, maximum):
"""
A validator that raises a :exc:`ValueError` if the initializer is called
with a value that does not belong in the [minimum, maximum] range. The
check is performed using ``minimum <= value and value <= maximum``
"""
return _RangeValidator(minimum, maximum) | 5,352,463 |
def sigmoid_prime(z):
"""Helper function for backpropagation"""
return sigmoid(z) * (1 - sigmoid(z)) | 5,352,464 |
def register_widget_util(ui_name, some_type, gen_widgets, apply_with_params):
"""
ui_name: the name of this utility in the UI
some_type: this utility will appear in the sidebar whenever your view function
returns a value of type ``some_type``
gen_widgets(val): a function that takes the report value (of the specified type), and
returns a list of widgets. These widget values will be passed like:
``apply_with_params(val, *widget_values)``.
apply_with_params: a function that takes the report value (of the specified type) as
its first parameter, followed by a list of arguments that are given by widgets. The function must
return the result of a call to ``file_response``
"""
def gen_html(val):
widgets = gen_widgets(val)
widget_data = widgets_template_data(widgets)
return render_template('utility_button.html', name=ui_name, widgets=widget_data)
def apply_util(val, data):
widgets = gen_widgets(val)
validate_widget_form_data(widgets, data)
inputs = parse_widget_form_data(widgets, data)
return apply_with_params(val, *inputs)
register_util_for_type(some_type, gen_html, apply_util) | 5,352,465 |
def _CreateSamplePostsubmitReport(manifest=None,
builder='linux-code-coverage',
modifier_id=0):
"""Returns a sample PostsubmitReport for testing purpose.
Note: only use this method if the exact values don't matter.
"""
manifest = manifest or _CreateSampleManifest()
return PostsubmitReport.Create(
server_host='chromium.googlesource.com',
project='chromium/src',
ref='refs/heads/main',
revision='aaaaa',
bucket='coverage',
builder=builder,
commit_timestamp=datetime(2018, 1, 1),
manifest=manifest,
summary_metrics=_CreateSampleCoverageSummaryMetric(),
build_id=123456789,
modifier_id=modifier_id,
visible=True) | 5,352,466 |
def create_doc_term_matrix():
"""
Load document-term matrix from disk into memory
"""
df = None
if os.path.isfile(DOCTERM_PICKLE):
print('Saved dataframe found! Loading saved document-term matrix...')
df = pd.read_pickle(DOCTERM_PICKLE)
else:
print('Could not find saved document-term matrix, loading from scratch...')
df = pd.read_csv(DOCTERM_FPATH, index_col=0, keep_default_na=False)
# Re-map original doc-term words to stemmed words
stemmer = nltk.stem.porter.PorterStemmer()
row_names = df.index.tolist()
stem_queries = [stemmer.stem(name) for name in row_names]
df.index = pd.Index(stem_queries)
# Collapse duplicate rows
df = df.groupby(df.index).sum()
print('Saving as pickle file...')
df.to_pickle(DOCTERM_PICKLE)
return df | 5,352,467 |
def _fetch_measurement_stats_arrays(
ssc_s: typing.List[_NIScopeSSC],
scalar_measurements: typing.List[niscope.ScalarMeasurement],
):
"""
private function for fetching statics for selected functions.
Obtains a waveform measurement and returns the measurement value. This
method may return multiple statistical results depending on the number
of channels, the acquisition type, and the number of records you
specify.
You specify a particular measurement type, such as rise time, frequency,
or voltage peak-to-peak. The waveform on which the digitizer calculates
the waveform measurement is from an acquisition that you previously
initiated. The statistics for the specified measurement method are
returned, where the statistics are updated once every acquisition when
the specified measurement is fetched by any of the Fetch Measurement
methods. If a Fetch Measurement method has not been called, this
method fetches the data on which to perform the measurement. The
statistics are cleared by calling
clear_waveform_measurement_stats.
Many of the measurements use the low, mid, and high reference levels.
You configure the low, mid, and high references with
meas_chan_low_ref_level,
meas_chan_mid_ref_level, and
meas_chan_high_ref_level to set each channel
differently.
Args:
ssc_s (typing.List[_NIScopeSSC]): List of sessions for various channels in groups.
scalar_measurements (typing.List[niscope.ScalarMeasurement]): The list of scalar
measurement to be performed on each fetched waveform.
Returns:
list of measurement_stats (list of MeasurementStats): Returns a list of class instances
with the following measurement statistics about the specified measurement:
- **result** (float): the resulting measurement
- **mean** (float): the mean scalar value, which is obtained by
averaging each fetch_measurement_stats call
- **stdev** (float): the standard deviations of the most recent
**numInStats** measurements
- **min_val** (float): the smallest scalar value acquired (the minimum
of the **numInStats** measurements)
- **max_val** (float): the largest scalar value acquired (the maximum
of the **numInStats** measurements)
- **num_in_stats** (int): the number of times fetch_measurement_stats has been called
- **channel** (str): channel name this result was acquired from
- **record** (int): record number of this result
"""
stats: typing.List[niscope.MeasurementStats] = []
for ssc, scalar_meas_function in zip(ssc_s, scalar_measurements):
stats.append(
ssc.session.channels[ssc.channels].fetch_measurement_stats(scalar_meas_function)
) # function with unknown type
return stats | 5,352,468 |
def test_load_model():
"""
GIVEN: The model defined inside classifier.py (L21)
WHEN: Checking that the correct model is being passed in, by looking on the model.get_config()['handle'].
THEN: The full path to the model.
"""
model = c.load_model()
assert 'https://tfhub.dev/google/aiy/vision/classifier/birds_V1/1' == model.get_config()['handle'] | 5,352,469 |
def test_cyren_feed_relationship_with_search_response(mocker, indicator):
"""
Given: File hash indicator.
When: Running cyren_feed_relationship command.
Then: Verify expected results returns
"""
from CyrenThreatInDepthRenderRelated import cyren_feed_relationship
args = dict(indicator=indicator)
mocker.patch.object(demisto, "searchIndicators", return_value=SEARCH_INDICATORS_RESPONSE)
result = cyren_feed_relationship(args)
assert result.readable_output == ("|Indicator Type|Value|Reputation|Relationship Type|Entity Category|Timestamp UTC|\n"
"|---|---|---|---|---|---|\n"
"| SHA-256 "
"| "
"[0f6dbfb291ba1b84601b0372f70db3430df636c631d074c1c2463f9e5a033f21]"
"(#/indicator/4467)<br> | "
"None (0) | downloaded from | malware | 2020-10-28, 14:45:24 |\n") | 5,352,470 |
def on_close_commit_buffer(commit_msg_filepath):
"""Actually trigger the commit.
on_close_commit_buffer(str) -> None
"""
r = _get_repo_for_tempfile(commit_msg_filepath)
try:
with open(commit_msg_filepath, 'r') as f:
success, msg = r.commit(f)
print(msg)
except FileNotFoundError:
print('Aborting commit due to empty commit message.') | 5,352,471 |
def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
"""Grow a stack of block devices.
This function is called recursively, with the childrens being the
first ones to resize.
@type disk: L{objects.Disk}
@param disk: the disk to be grown
@type amount: integer
@param amount: the amount (in mebibytes) to grow with
@type dryrun: boolean
@param dryrun: whether to execute the operation in simulation mode
only, without actually increasing the size
@param backingstore: whether to execute the operation on backing storage
only, or on "logical" storage only; e.g. DRBD is logical storage,
whereas LVM, file, RBD are backing storage
@rtype: (status, result)
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active
@return: a tuple with the status of the operation (True/False), and
the errors message if status is False
"""
r_dev = _RecursiveFindBD(disk)
if r_dev is None:
_Fail("Cannot find block device %s", disk)
try:
r_dev.Grow(amount, dryrun, backingstore, excl_stor)
except errors.BlockDeviceError, err:
_Fail("Failed to grow block device: %s", err, exc=True) | 5,352,472 |
def nativeMouseY(self):
"""
TOWRITE
:rtype: qreal
"""
scene = self.activeScene() # QGraphicsScene*
if scene:
qDebug("mouseY: %.50f" % -scene.property("SCENE_MOUSE_POINT").y()) # .toPointF().y())
if scene:
return -scene.property("SCENE_MOUSE_POINT").y() # .toPointF().y()
return 0.0 | 5,352,473 |
def generate_repository_dependencies_folder_label_from_key( repository_name, repository_owner, changeset_revision, key ):
"""Return a repository dependency label based on the repository dependency key."""
if key_is_current_repositorys_key( repository_name, repository_owner, changeset_revision, key ):
label = 'Repository dependencies'
else:
label = "Repository <b>%s</b> revision <b>%s</b> owned by <b>%s</b>" % ( repository_name, changeset_revision, repository_owner )
return label | 5,352,474 |
def weighted_characteristic_path_length(matrix):
"""Calculate the characteristic path length for weighted graphs."""
n_nodes = len(matrix)
min_distances = weighted_shortest_path(matrix)
sum_vector = np.empty(n_nodes)
for i in range(n_nodes):
# calculate the inner sum
sum_vector[i] = (1/(n_nodes-1)) * np.sum([min_distances[i, j] for j in range(n_nodes) if j != i])
return (1/n_nodes) * np.sum(sum_vector) | 5,352,475 |
def doctor(cli):
"""Basic QMK environment checks.
This is currently very simple, it just checks that all the expected binaries are on your system.
TODO(unclaimed):
* [ ] Compile a trivial program with each compiler
"""
cli.log.info('QMK Doctor is checking your environment.')
ok = True
# Determine our OS and run platform specific tests
platform_id = platform.platform().lower()
if 'darwin' in platform_id or 'macos' in platform_id:
if not os_test_macos():
ok = False
elif 'linux' in platform_id:
if not os_test_linux():
ok = False
elif 'windows' in platform_id:
if not os_test_windows():
ok = False
else:
cli.log.error('Unsupported OS detected: %s', platform_id)
ok = False
# Make sure the basic CLI tools we need are available and can be executed.
bin_ok = check_binaries()
if not bin_ok:
if yesno('Would you like to install dependencies?', default=True):
run(['util/qmk_install.sh'])
bin_ok = check_binaries()
if bin_ok:
cli.log.info('All dependencies are installed.')
else:
ok = False
# Make sure the tools are at the correct version
for check in (check_arm_gcc_version, check_avr_gcc_version, check_avrdude_version, check_dfu_util_version, check_dfu_programmer_version):
if not check():
ok = False
# Check out the QMK submodules
sub_ok = check_submodules()
if sub_ok:
cli.log.info('Submodules are up to date.')
else:
if yesno('Would you like to clone the submodules?', default=True):
submodules.update()
sub_ok = check_submodules()
if not sub_ok:
ok = False
# Report a summary of our findings to the user
if ok:
cli.log.info('{fg_green}QMK is ready to go')
else:
cli.log.info('{fg_yellow}Problems detected, please fix these problems before proceeding.')
# FIXME(skullydazed/unclaimed): Link to a document about troubleshooting, or discord or something | 5,352,476 |
async def process_name(message: types.Message, state: FSMContext):
"""
Process user name
"""
async with state.proxy() as data:
data['name'] = message.text
await RegisterForm.next()
await message.reply("How old are you?") | 5,352,477 |
def preproc(config):
"""Preprocess the CNN on Illumina reads using the supplied configuration."""
# Set the number of cores to use
max_cores = config['Devices'].getint('N_CPUs')
# Set input and output paths
neg_path = config['InputPaths']['Fasta_Class_0']
pos_path = config['InputPaths']['Fasta_Class_1']
out_data_path = config['OutputPaths']['OutData']
out_labels_path = config['OutputPaths']['OutLabels']
# Set additional options: shuffle, gzip compression, RC augmentation, data type
do_shuffle = config['Options'].getboolean('Do_shuffle')
if do_shuffle:
seed = config['Options'].getint('ShuffleSeed')
np.random.seed(seed)
do_gzip = config['Options'].getboolean('Do_gzip')
do_revc = config['Options'].getboolean('Do_revc')
datatype = config['Options']['DataType']
read_length = config['Options'].getint('ReadLength')
use_tfdata = config['Options'].getboolean('Use_TFData')
n_files = config['Options'].getint('N_Files')
# Set alphabet and prepare the tokenizer
alphabet = "ACGT"
tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True)
tokenizer.fit_on_texts(alphabet)
# Preprocess
if neg_path != "none":
print("Preprocessing negative data...")
with open(neg_path) as input_handle:
# Parse fasta and tokenize in parallel. Partial function takes tokenizer as a fixed argument.
# Tokenize function is applied to the fasta sequence generator.
if max_cores > 1:
with Pool(processes=max_cores) as p:
x_train_neg = np.asarray(p.map(partial(tokenize, tokenizer=tokenizer, datatype=datatype,
read_length=read_length), read_fasta(input_handle)),
dtype=datatype)
else:
x_train_neg = np.asarray(list(map(partial(tokenize, tokenizer=tokenizer, datatype=datatype,
read_length=read_length), read_fasta(input_handle))),
dtype=datatype)
# Count negative samples
n_negative = x_train_neg.shape[0]
else:
x_train_neg = np.zeros((0, read_length, 4),dtype=np.datatype)
n_negative = 0
if pos_path != "none":
print("Preprocessing positive data...")
with open(pos_path) as input_handle:
# Parse fasta, tokenize in parallel & concatenate to negative data
if max_cores > 1:
with Pool(processes=max_cores) as p:
x_train_pos = np.asarray(p.map(partial(tokenize, tokenizer=tokenizer, datatype=datatype,
read_length=read_length), read_fasta(input_handle)),
dtype=datatype)
else:
x_train_pos = np.asarray(list(map(partial(tokenize, tokenizer=tokenizer, datatype=datatype,
read_length=read_length), read_fasta(input_handle))),
dtype=datatype)
# Count positive samples
n_positive = x_train_pos.shape[0]
else:
x_train_pos = np.zeros((0, read_length, 4),dtype=np.datatype)
n_positive = 0
# Concatenate
x_train = np.concatenate((x_train_neg, x_train_pos))
# Add labels
y_train = np.concatenate((np.repeat(0, n_negative).astype(datatype), np.repeat(1, n_positive).astype(datatype)))
# All sequences must have the same length. Then x_train is an array and the view below can be created
# Note: creating a view instead of reversing element-wise saves a lot of memory
# RC augmentation: Add reverse-complements by reversing both dimensions of the matrix
# assumes the following order of columns: "ACGT"
if do_revc:
print("Augmenting data...")
x_train = np.concatenate((x_train, x_train[::, ::-1, ::-1]))
y_train = np.concatenate((y_train, y_train))
if do_shuffle:
indices = np.arange(len(y_train))
np.random.shuffle(indices)
x_train = x_train[indices, ::, ::]
y_train = y_train[indices]
# Save matrices #
print("Saving data...")
# Save output
if not use_tfdata:
# Compress output files
if do_gzip:
f_data = gzip.GzipFile(out_data_path + ".gz", "w")
f_labels = gzip.GzipFile(out_labels_path + ".gz", "w")
else:
f_data = out_data_path
f_labels = out_labels_path
np.save(file=f_data, arr=x_train)
np.save(file=f_labels, arr=y_train)
else:
out_dir = os.path.splitext(out_data_path)[0]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
n_all = n_negative + n_positive
slice_size = math.ceil(n_all/n_files)
for i in range(n_files):
start = i * slice_size
end = min((i+1) * slice_size, n_all)
features_dataset = tf.data.Dataset.from_tensor_slices((x_train[start:end], y_train[start:end]))
serialized_features_dataset = features_dataset.map(tf_serialize_example)
filename = os.path.join(out_dir, os.path.splitext(os.path.basename(out_dir))[0]
+ '_{}-{}.tfrec'.format(start, end - 1))
writer = tf.data.experimental.TFRecordWriter(filename)
if tf.executing_eagerly():
writer.write(serialized_features_dataset)
else:
with tf.compat.v1.Session() as sess:
sess.run(writer.write(serialized_features_dataset))
print("Done!") | 5,352,478 |
def execute_timeout(cnx, command, **kwargs):
"""Perform Sqlite3 command to be interrupted if running too long.
If the given command is a string, it is executed as SQL.
If the command is a callable, call it with the cnx and any given
keyword arguments.
Raises SystemError if interrupted by timeout.
"""
config = flask.current_app.config
event = threading.Event()
timeout = config["EXECUTE_TIMEOUT"]
args = (
cnx,
event,
timeout,
config["EXECUTE_TIMEOUT_INCREMENT"],
config["EXECUTE_TIMEOUT_BACKOFF"],
)
thread = threading.Thread(target=_timeout_interrupt, args=args)
thread.start()
event.set()
try:
if isinstance(command, str): # SQL
result = cnx.execute(command)
elif callable(command):
result = command(cnx, **kwargs)
except sqlite3.ProgrammingError:
raise
except sqlite3.OperationalError as error:
# This looks like a bug in the sqlite3 module:
# SQL syntax error should raise sqlite3.ProgrammingError,
# not sqlite3.OperationalError, which is what it does.
# That's why the error message has to be checked.
if str(error) == "interrupted":
raise SystemError(f"execution exceeded {timeout} seconds; interrupted")
else:
raise
event.clear()
thread.join()
return result | 5,352,479 |
def destroy():
"""Destroy this Heroku application. Wipe it from existance.
.. note::
This really will completely destroy your application. Think twice.
"""
local('heroku apps:destroy') | 5,352,480 |
def mean_IoU(threshold=0.5, center_crop=0, get_batch_mean=True):
"""
- y_true is a 3D array. Each channel represents the ground truth BINARY channel
- y_pred is a 3D array. Each channel represents the predicted BINARY channel
"""
def _f(y_true, y_pred):
y_true = fix_input(y_true)
y_pred = fix_input(y_pred)
y_true = get_binary_img(
y_true,
threshold=threshold,
center_crop=center_crop
)
y_pred = get_binary_img(
y_pred,
threshold=threshold,
center_crop=center_crop
)
inter = get_intersection(y_true, y_pred)
union = get_alls(y_true, y_pred) - inter
batch_metric = eps_divide(inter, union)
if get_batch_mean:
return K.mean(batch_metric, axis=-1)
return batch_metric
_f.__name__ = 'attila_metrics_{}'.format('mean_IoU')
return _f | 5,352,481 |
def unsatZone_withAgri_Ep(self, k):
"""
- Potential evaporation is calculated with formula in 'JarvisCoefficients', but without
using the Jarvis stress functions
- Potential evaporation is decreased by energy used for interception evaporation
- Formula for evaporation linear until LP, from than with potential rate
- Outgoing fluxes are determined based on (value in previous timestep + inflow)
and if this leads to negative storage, the outgoing fluxes are corrected to rato
- Qu is determined with a beta function (same as in HBV?)
- Code for ini-file: 14
"""
JarvisCoefficients.calcEp(self, k)
self.PotEvaporation = pcr.cover(pcr.ifthenelse(self.EpHour >= 0, self.EpHour, 0), 0)
self.Sa[k] = pcr.ifthenelse(
self.Sa_t[k] + self.Pe > self.samax[k], self.samax[k], self.Sa_t[k] + self.Pe
)
self.Qaadd = pcr.ifthenelse(
self.Sa_t[k] + self.Pe > self.samax[k],
self.Sa_t[k] + self.Pe - self.samax[k],
0,
)
self.SaN = self.Sa[k] / self.samax[k]
self.Ea1 = pcr.max((self.PotEvaporation - self.Ei), 0) * pcr.min(
self.Sa[k] / (self.samax[k] * self.LP[k]), 1
)
self.Qa1 = (self.Pe - self.Qaadd) * (1 - (1 - self.SaN) ** self.beta[k])
self.Fa1 = self.famax[k] * (self.sumax[k] - self.Su[k]) / self.sumax[k]
self.Sa[k] = self.Sa_t[k] + (self.Pe - self.Qaadd) - self.Qa1 - self.Ea1 - self.Fa1
self.Sa_diff = pcr.ifthenelse(self.Sa[k] < 0, self.Sa[k], 0)
self.Ea = (
self.Ea1
+ (
self.Ea1
/ pcr.ifthenelse(
self.Qa1 + self.Ea1 + self.Fa1 > 0, self.Qa1 + self.Ea1 + self.Fa1, 1
)
)
* self.Sa_diff
)
self.Qa = (
self.Qa1
+ (
self.Qa1
/ pcr.ifthenelse(
self.Qa1 + self.Ea1 + self.Fa1 > 0, self.Qa1 + self.Ea1 + self.Fa1, 1
)
)
* self.Sa_diff
)
self.Fa = pcr.ifthenelse(
self.Fa1 > 0,
self.Fa1
+ (
self.Fa1
/ pcr.ifthenelse(
self.Qa1 + self.Ea1 + self.Fa1 > 0, self.Qa1 + self.Ea1 + self.Fa1, 1
)
)
* self.Sa_diff,
self.Fa1,
)
self.Sa[k] = self.Sa_t[k] + (self.Pe - self.Qaadd) - self.Ea - self.Qa - self.Fa
self.Sa[k] = pcr.ifthenelse(self.Sa[k] < 0, 0, self.Sa[k])
self.Sa_diff2 = pcr.ifthen(self.Sa[k] < 0, self.Sa[k])
self.Capa = pcr.min(self.cap[k] * (1 - self.Sa[k] / self.samax[k]), self.Su[k])
self.Sa[k] = self.Sa[k] + self.Capa
self.Su[k] = self.Su_t[k] + self.Fa - self.Capa
self.Perc = self.perc[k] * (self.Su[k] / self.sumax[k])
self.Su[k] = self.Su[k] - self.Perc
self.wbSa_[k] = (
self.Pe
- self.Ea
- self.Qa
- self.Qaadd
- self.Fa
+ self.Capa
- self.Sa[k]
+ self.Sa_t[k]
)
self.wbSu_[k] = self.Fa - self.Perc - self.Capa - self.Su[k] + self.Su_t[k]
self.Eu_[k] = self.Ea
self.Qu_[k] = self.Qa + self.Qaadd
self.Fa_[k] = self.Fa
self.Cap_[k] = self.Cap
self.Perc_[k] = self.Perc | 5,352,482 |
def ConvertCSVStringToList(csv_string):
"""Helper to convert a csv string to a list."""
reader = csv.reader([csv_string])
return list(reader)[0] | 5,352,483 |
def add_data_to_taskw(data: dict, module: str, quest: str):
"""
Insert data to TaskWarrior DB.
"""
twarrior = TaskWarrior()
if quest is not None:
project_data = module + ":" + quest
timestamp = convert_to_unix_tstamp(data['end'], False)
else:
project_data = module
timestamp = convert_to_unix_tstamp(data['date_end'], True)
if not task_exists(twarrior, data['name']) and not is_stage_validated(data):
twarrior.task_add(data['name'], due=timestamp, project=project_data) | 5,352,484 |
def get_section_range_pairs(orig_section, new_pdf):
"""Return MatchingSection for a section."""
other_section = new_pdf.find_corresponding_section(orig_section)
if not other_section:
print("Skipping section {} - no match in the other doc!".format(
orig_section.title))
return None
return MatchingSection(
title=orig_section.title,
orig_range=orig_section.pdf_diff_options,
new_range=other_section.pdf_diff_options) | 5,352,485 |
def atomic_transaction(conn: sqlite3.Connection,
sql: str, *args: Any) -> sqlite3.Cursor:
"""Perform an **atomic** transaction.
The transaction is committed if there are no exceptions else the
transaction is rolled back.
Args:
conn: database connection
sql: formatted string
*args: arguments to use for parameter substitution
Returns:
sqlite cursor
"""
try:
c = transaction(conn, sql, *args)
except Exception as e:
logging.exception("Could not execute transaction, rolling back")
conn.rollback()
raise e
conn.commit()
return c | 5,352,486 |
def convert_to_tensor(narray, device):
"""Convert numpy to tensor."""
return tf.convert_to_tensor(narray, tf.float32) | 5,352,487 |
def get_config_origin(c):
"""Return appropriate configuration origin
Parameters
----------
c: Configuration
configuration to be examined
Returns
-------
origin: str
origin of configuration (e.g. "Local", "Random", etc.)
"""
if not c.origin:
origin = "Unknown"
elif c.origin.startswith("Local") or c.origin == 'Model based pick' or "sorted" in c.origin:
origin = "Acquisition Function"
elif c.origin.startswith("Random"):
origin = "Random"
else:
logging.getLogger("cave.utils.helpers").debug("Cannot interpret origin: %s", c.origin)
origin = "Unknown"
return origin | 5,352,488 |
def get_instance_ip() -> str:
"""
For a given identifier for a deployment (env var of IDENTIFIER), find the cluster
that was deployed, find the tasks within the cluster (there should only be one),
find the network interfaces on that task, and return the public IP of the instance
:returns: str The public ip of the remote instance
"""
ecs_c = boto3.client("ecs")
task_arns = ecs_c.list_tasks(
cluster=f"remote-cluster-{IDENTIFIER}", desiredStatus="RUNNING"
)["taskArns"]
if task_arns:
tasks = ecs_c.describe_tasks(
cluster=f"remote-cluster-{IDENTIFIER}", tasks=task_arns
)["tasks"]
# Should only ever be one task and network interface on deployment
task_details = {
d["name"]: d["value"] for d in tasks[0]["attachments"][0]["details"]
}
interface_id = task_details["networkInterfaceId"]
ec2_c = boto3.client("ec2")
network_interfaces = ec2_c.describe_network_interfaces(
NetworkInterfaceIds=[interface_id]
)["NetworkInterfaces"]
return network_interfaces[0]["Association"]["PublicIp"]
else:
return None | 5,352,489 |
def is_hign_level_admin():
"""超级管理员"""
return is_admin() and request.user.level == 1 | 5,352,490 |
def object_metadata(save_path):
"""Retrieves information about the objects in a checkpoint.
Example usage:
```python
object_graph = tf.contrib.checkpoint.object_metadata(
tf.train.latest_checkpoint(checkpoint_directory))
ckpt_variable_names = set()
for node in object_graph.nodes:
for attribute in node.attributes:
ckpt_variable_names.add(attribute.full_name)
```
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`.
Returns:
A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.
Raises:
ValueError: If an object graph was not found in the checkpoint.
"""
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
raise ValueError(
('The specified checkpoint "%s" does not appear to be object-based (it '
'is missing the key "%s"). Likely it was created with a name-based '
"saver and does not contain an object dependency graph.") %
(save_path, base.OBJECT_GRAPH_PROTO_KEY))
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
return object_graph_proto | 5,352,491 |
async def login(_request: Request, _user: User) -> response.HTTPResponse:
"""
Login redirect
"""
return redirect(app.url_for("pages.portfolios")) | 5,352,492 |
def test_estimate_gas_fails_if_startgas_is_higher_than_blockgaslimit(deploy_client):
""" Gas estimation fails if the transaction execution requires more gas
then the block's gas limit.
"""
contract_proxy, _ = deploy_rpc_test_contract(deploy_client, "RpcWithStorageTest")
latest_block_hash = deploy_client.blockhash_from_blocknumber("latest")
current_gas_limit = deploy_client.get_block(latest_block_hash)["gasLimit"]
# This number of iterations is an over estimation to accomodate for races,
# this cannot be significantly large because on parity it is a blocking
# call.
number_iterations = current_gas_limit // SSTORE_COST
# This race condition cannot be fixed because geth does not support
# block_identifier for eth_estimateGas. The test should not be flaky
# because number_iterations is order of magnitudes larger then it needs to
# be
block_identifier = None
startgas = contract_proxy.estimate_gas(block_identifier, "waste_storage", number_iterations)
assert startgas is None, "estimate_gas must return empty if sending the transaction would fail" | 5,352,493 |
def delete_source(source_uuid: SourceId, database: Database):
"""Delete a source."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = SourceData(data_model, reports, source_uuid)
delta_description = (
f"{{user}} deleted the source '{data.source_name}' from metric "
f"'{data.metric_name}' of subject '{data.subject_name}' in report '{data.report_name}'."
)
uuids = [data.report_uuid, data.subject_uuid, data.metric_uuid, source_uuid]
del data.metric["sources"][source_uuid]
return insert_new_report(database, delta_description, (data.report, uuids)) | 5,352,494 |
def find_object_with_matching_attr(iterable, attr_name, value):
"""
Finds the first item in an iterable that has an attribute with the given name and value. Returns
None otherwise.
Returns:
Matching item or None
"""
for item in iterable:
try:
if getattr(item, attr_name) == value:
return item
except AttributeError:
pass
return None | 5,352,495 |
def send_message(token, message: str) -> str:
"""
A function that notifies LINENotify of the character string given as an argument
:param message:
A string to be notified
:param token:
LineNotify Access Token
:return response:
server response (thats like 200 etc...)
"""
notify = Notifer(token)
return notify.send_message(message) | 5,352,496 |
def hdf5pack(hdf5_file,
active_areas=None,
address=None,
attenuation=None,
beam_center_x=None,
beam_center_y=None,
ccd_image_saturation=None,
data=None,
distance=None,
pixel_size=None,
pulse_length=None,
saturated_value=None,
timestamp=None,
wavelength=None,
xtal_target=None):
"""Similar but far from identical to the HDF5 output from CASS. XXX
Poor diagnostics--we don't know if it failed or not.
@note Does not include the deprecated SEQUENCE_NUMBER attribute.
While some redundant items are written in order to keep the
HDF5 synchronised to the pickle format, neither SIZE1 nor
SIZE2 are included.
"""
# Need this because we cannot write None values to the HDF5 file.
if address is None:
address = repr(None)
if attenuation is None:
attenuation = 0
if xtal_target is None:
xtal_target = repr(None)
if pixel_size is None:
pixel_size = globals()['pixel_size'] # XXX CSpad-specific!
if pulse_length is None:
pulse_length = 0
d = dpack(address=address,
active_areas=active_areas,
beam_center_x=beam_center_x,
beam_center_y=beam_center_y,
ccd_image_saturation=ccd_image_saturation,
data=data,
distance=distance,
pixel_size=pixel_size,
saturated_value=saturated_value,
timestamp=timestamp,
wavelength=wavelength,
xtal_target=xtal_target)
if d is None:
return
grp_event = hdf5_file.create_group(d['TIMESTAMP'])
grp_detector = grp_event.create_group(address)
for (key, value) in six.iteritems(d):
if key == 'ACTIVE_AREAS':
grp_detector.create_dataset(key, data=value.as_numpy_array())
elif key == 'DATA':
# Compress the image data with gzip at the default level (4).
# CASS seems to use maximum compression level (9), which gives a
# moderate decrease in file size at the price of much longer
# running time.
grp_detector.create_dataset(
key, compression='gzip', data=value.as_numpy_array())
else:
grp_event.create_dataset(key, data=[value])
grp_event.create_dataset('ATTENUATION', data=[attenuation])
grp_event.create_dataset('PULSE_LENGTH', data=[pulse_length]) | 5,352,497 |
def _fix(node):
"""Fix the naive construction of the adjont.
See `fixes.py` for details.
This function also returns the result of reaching definitions analysis so
that `split` mode can use this to carry over the state from primal to
adjoint.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
node: A module with the primal and adjoint function with additional
variable definitions and such added so that pushes onto the stack and
gradient accumulations are all valid.
defined: The variables defined at the end of the primal.
reaching: The variable definitions that reach the end of the primal.
"""
# Do reaching definitions analysis on primal and adjoint
pri_cfg = cfg.CFG.build_cfg(node.body[0])
defined = cfg.Defined()
defined.visit(pri_cfg.entry)
reaching = cfg.ReachingDefinitions()
reaching.visit(pri_cfg.entry)
cfg.forward(node.body[1], cfg.Defined())
cfg.forward(node.body[1], cfg.ReachingDefinitions())
# Remove pushes of variables that were never defined
fixes.CleanStack().visit(node)
fixes.FixStack().visit(node.body[0])
# Change accumulation into definition if possible
fixes.CleanGrad().visit(node.body[1])
# Define gradients that might or might not be defined
fixes.FixGrad().visit(node.body[1])
return node, defined.exit, reaching.exit | 5,352,498 |
def change_lane(vid, lane):
"""
Let a vehicle change lane without respecting any safety distance
:param vid: vehicle id
:param lane: lane index
"""
traci.vehicle.setLaneChangeMode(vid, DEFAULT_LC)
traci.vehicle.changeLane(vid, lane, 10000.0) | 5,352,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.