content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def show_user_following(user_id):
"""Show list of people this user is following."""
user = User.query.get_or_404(user_id)
return render_template('users/following.html', user=user) | ef1d7d13e9c00c352f27cdde17d215d40ff47b76 | 3,658,500 |
def logout():
"""
This API revokes all the tokens including access and refresh tokens that belong to the user.
"""
current_user = get_jwt_identity()
logout_user(current_user.get('id'))
return jsonify(message="Token revoked."), 200 | d574135099dfaedcdb8d6bdef993d8f773898f63 | 3,658,501 |
def multiset_counter(mset):
"""
Return the sum of occurences of elements present in a token ids multiset,
aka. the multiset cardinality.
"""
return sum(mset.values()) | 36885abd5bf666aa6c77a262a647c227e46d2e88 | 3,658,502 |
def get_v6_subnet(address):
"""derive subnet number for provided ipv6 address
Args:
address (str): ipv6 address in string with mask
Returns:
str: subnet zero == network address
"""
return IPv6(address).subnet_zero() | ed9158b2d2ff8a83dce1b079066ef372ffc623e5 | 3,658,503 |
import os
def get_ros_package_path(env=None):
"""
Get the current ROS_PACKAGE_PATH.
:param env: (optional) environment override, ``dict``
"""
if env is None:
env = os.environ
return env.get(ROS_PACKAGE_PATH, None) | 85a7db954919892440156af4f9218b52014575e2 | 3,658,504 |
import yaml
def load_scenario(file_name: str) -> Waypoint:
"""
Create an object Waypoint from a Scenario file
:param file_name:
:return:
"""
# read file
with open(f"{waypoint_directory_path}/{file_name}", "r") as scenario_file:
scenario_data = yaml.load(scenario_file, Loader=yaml.FullLoader)
waypoint = Waypoint()
waypoint.build_from_json(scenario_data)
return waypoint | db5e246141e014af4545468481739e9449d90a00 | 3,658,505 |
import argparse
from textwrap import dedent
def parseArguments(argv=None): # pragma: no cover
"""
I parse arguments in sys.argv and return the args object. The parser
itself is available as args.parser.
Adds the following members to args:
parser = the parser object
store_opt = the StoreOpt object
"""
store_opt = StoreOpt()
parser = argparse.ArgumentParser(
prog="green",
usage="%(prog)s [options] [target [target2 ...]]",
add_help=False,
description=dedent(
"""
Green is a clean, colorful, fast test runner for Python unit tests.
""".rstrip()
),
epilog=dedent(
"""
ENABLING SHELL COMPLETION
To enable bash- or zsh-completion, add the line below to the end of your
.bashrc or .zshrc file (or equivalent config file):
which green >& /dev/null && source "$( green --completion-file )"
Warning! Generating a completion list actually discovers and loads tests
-- this can be very slow if you run it in huge directories!
SETUP.PY RUNNER
To run green as a setup.py command, simply add green to the 'setup_requires'
section in the setup.py file, and specify a target as the 'test_suite'
parameter if you do not want green to load all the tests:
setup(
setup_requires = ['green'],
install_requires = 'myproject.tests'
)
Then simply run green as any other setup.py command (it accepts the same
parameters as the 'green' executable):
python setup.py green
python setup.py green -r # to run with coverage, etc.
CONFIG FILES
For documentation on config files, please see
https://github.com/CleanCut/green#config-files
""".rstrip()
),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
target_args = parser.add_argument_group("Target Specification")
target_args.add_argument(
"targets",
action="store",
nargs="*",
metavar="target",
help=(
"""Targets to test. Any number of targets may be specified. If
blank, then discover all testcases in the current directory tree. Can
be a directory (or package), file (or module), or fully-qualified
'dotted name' like proj.tests.test_things.TestStuff. If a directory
(or package) is specified, then we will attempt to discover all tests
under the directory (even if the directory is a package and the tests
would not be accessible through the package's scope). In all other
cases, only tests accessible from introspection of the object will
be loaded."""
),
default=argparse.SUPPRESS,
)
concurrency_args = parser.add_argument_group("Concurrency Options")
store_opt(
concurrency_args.add_argument(
"-s",
"--processes",
action="store",
type=int,
metavar="NUM",
help="Number of processes to use to run tests. Note that your "
"tests need to be written to avoid using the same resources (temp "
"files, sockets, ports, etc.) for the multi-process mode to work "
"well (--initializer and --finalizer can help provision "
"per-process resources). Default is to run the same number of "
"processes as your machine has logical CPUs. Note that for a "
"small number of trivial tests, running everything in a single "
"process may be faster than the overhead of initializing all the "
"processes.",
default=argparse.SUPPRESS,
)
)
store_opt(
concurrency_args.add_argument(
"-i",
"--initializer",
action="store",
metavar="DOTTED_FUNCTION",
help="Python function to run inside of a single worker process "
"before it starts running tests. This is the way to provision "
"external resources that each concurrent worker process needs to "
"have exclusive access to. Specify the function in dotted "
"notation in a way that will be importable from the location you "
"are running green from.",
default=argparse.SUPPRESS,
)
)
store_opt(
concurrency_args.add_argument(
"-z",
"--finalizer",
action="store",
metavar="DOTTED_FUNCTION",
help="Same as --initializer, only run at the end of a worker "
"process's lifetime. Used to unprovision resources provisioned by "
"the initializer.",
default=argparse.SUPPRESS,
)
)
format_args = parser.add_argument_group("Format Options")
store_opt(
format_args.add_argument(
"-t",
"--termcolor",
action="store_true",
help="Force terminal colors on. Default is to autodetect.",
default=argparse.SUPPRESS,
)
)
store_opt(
format_args.add_argument(
"-T",
"--notermcolor",
action="store_true",
help="Force terminal colors off. Default is to autodetect.",
default=argparse.SUPPRESS,
)
)
store_opt(
format_args.add_argument(
"-W",
"--disable-windows",
action="store_true",
help="Disable Windows support by turning off Colorama",
default=argparse.SUPPRESS,
)
)
out_args = parser.add_argument_group("Output Options")
store_opt(
out_args.add_argument(
"-a",
"--allow-stdout",
action="store_true",
help=(
"Instead of capturing the stdout and stderr and presenting it "
"in the summary of results, let it come through. Note that "
"output from sources other than tests (like module/class setup "
"or teardown) is never captured."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-q",
"--quiet-stdout",
action="store_true",
help=(
"Instead of capturing the stdout and stderr and presenting it "
"in the summary of results, discard it completly for successful "
"tests. --allow-stdout option overrides it."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-k",
"--no-skip-report",
action="store_true",
help=(
"Don't print the report of skipped tests "
"after testing is done. Skips will still show up in the progress "
"report and summary count."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-e",
"--no-tracebacks",
action="store_true",
help=("Don't print tracebacks for failures and " "errors."),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-h",
"--help",
action="store_true",
help="Show this help message and exit.",
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-V",
"--version",
action="store_true",
help="Print the version of Green and Python and exit.",
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-l",
"--logging",
action="store_true",
help="Don't configure the root logger to redirect to /dev/null, "
"enabling internal debugging output, as well as any output test (or "
"tested) code may be sending via the root logger.",
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-d",
"--debug",
action="count",
help=(
"Enable internal debugging statements. Implies --logging. Can "
"be specified up to three times for more debug output."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-v",
"--verbose",
action="count",
help=(
"Verbose. Can be specified up to three times for more "
"verbosity. Recommended levels are -v and -vv."
),
default=argparse.SUPPRESS,
)
)
store_opt(
out_args.add_argument(
"-U",
"--disable-unidecode",
action="store_true",
help=(
"Disable unidecode which converts test output from unicode to"
"ascii by default on Windows to avoid hard-to-debug crashes."
),
default=argparse.SUPPRESS,
)
)
other_args = parser.add_argument_group("Other Options")
store_opt(
other_args.add_argument(
"-f",
"--failfast",
action="store_true",
help=("Stop execution at the first test that fails or errors."),
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-c",
"--config",
action="store",
metavar="FILE",
help="Use this config file to override any values from "
"the config file specified by environment variable GREEN_CONFIG, "
"~/.green, and .green in the current working directory.",
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-p",
"--file-pattern",
action="store",
metavar="PATTERN",
help="Pattern to match test files. Default is test*.py",
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-n",
"--test-pattern",
action="store",
metavar="PATTERN",
help="Pattern to match test method names after "
"'test'. Default is '*', meaning match methods named 'test*'.",
default=argparse.SUPPRESS,
)
)
store_opt(
other_args.add_argument(
"-j",
"--junit-report",
action="store",
metavar="FILENAME",
help=("Generate a JUnit XML report."),
default=argparse.SUPPRESS,
)
)
cov_args = parser.add_argument_group(
"Coverage Options ({})".format(coverage_version)
)
store_opt(
cov_args.add_argument(
"-r",
"--run-coverage",
action="store_true",
help=("Produce coverage output."),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-g",
"--cov-config-file",
action="store",
metavar="FILE",
help=(
"Specify a coverage config file. "
"Implies --run-coverage See the coverage documentation "
"at https://coverage.readthedocs.io/en/v4.5.x/config.html "
"for coverage config file syntax. The [run] and [report] sections "
"are most relevant."
),
default=argparse.SUPPRESS,
)
),
store_opt(
cov_args.add_argument(
"-R",
"--quiet-coverage",
action="store_true",
help=(
"Do not print coverage report to stdout (coverage files will "
"still be created). Implies --run-coverage"
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-O",
"--clear-omit",
action="store_true",
help=(
"Green tries really hard to set up a good list of patterns of "
"files to omit from coverage reports. If the default list "
"catches files that you DO want to cover you can specify this "
"flag to leave the default list empty to start with. You can "
"then add patterns back in with --omit-patterns. The default "
"list is something like '*/test*,*/mock*,*(temp dir)*,*(python "
"system packages)*' -- only longer."
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-u",
"--include-patterns",
action="store",
metavar="PATTERN",
help=(
"Comma-separated file-patterns to include in coverage. This "
"implies that anything that does not match the include pattern is "
"omitted from coverage reporting."
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-o",
"--omit-patterns",
action="store",
metavar="PATTERN",
help=(
"Comma-separated file-patterns to omit from coverage. For "
"example, if coverage reported a file mypackage/foo/bar you could "
"omit it from coverage with 'mypackage*', '*/foo/*', or '*bar'"
),
default=argparse.SUPPRESS,
)
)
store_opt(
cov_args.add_argument(
"-m",
"--minimum-coverage",
action="store",
metavar="X",
type=int,
help=(
"Integer. A minimum coverage value. If "
"not met, then we will print a message and exit with a nonzero "
"status. Implies --run-coverage"
),
default=argparse.SUPPRESS,
)
)
integration_args = parser.add_argument_group("Integration Options")
store_opt(
integration_args.add_argument(
"--completion-file",
action="store_true",
help=(
"Location of the bash- and zsh-completion "
"file. To enable bash- or zsh-completion, see ENABLING SHELL "
"COMPLETION below."
),
default=argparse.SUPPRESS,
)
)
store_opt(
integration_args.add_argument(
"--completions",
action="store_true",
help=(
"Output possible completions of the given target. Used by "
"bash- and zsh-completion."
),
default=argparse.SUPPRESS,
)
)
store_opt(
integration_args.add_argument(
"--options",
action="store_true",
help="Output all options. Used by bash- and zsh-completion.",
default=argparse.SUPPRESS,
)
)
args = parser.parse_args(argv)
# Add additional members
args.parser = parser
args.store_opt = store_opt
return args | 0f2067daacb6270ac780927e1de48684d66fb469 | 3,658,506 |
def parse_example(serialized_example):
"""Parse a serialized example proto."""
features = tf.io.parse_single_example(
serialized_example,
dict(
beam_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
image_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
question_id=tf.io.FixedLenFeature(shape=[], dtype=tf.int64),
context=tf.io.FixedLenFeature(shape=[], dtype=tf.string),
question=tf.io.FixedLenFeature(shape=[], dtype=tf.string)))
return features | 5c3a76bc121f02ce4484a3af87104f7739db1669 | 3,658,507 |
from typing import Optional
from typing import Tuple
from typing import Union
def _compute_bootstrap_quantiles_point_estimate_custom_bias_corrected_method(
metric_values: np.ndarray,
false_positive_rate: np.float64,
n_resamples: int,
random_seed: Optional[int] = None,
) -> Tuple[Number, Number]:
"""
An internal implementation of the "bootstrap" estimator method, returning a point estimate for a population
parameter of interest (lower and upper quantiles in this case). See
https://en.wikipedia.org/wiki/Bootstrapping_(statistics) for an introduction to "bootstrapping" in statistics.
The methods implemented here can be found in:
Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 124-130).
Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
This implementation is sub-par compared to the one available from the "SciPy" standard library
("https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html"), in that it does not handle
multi-dimensional statistics. "scipy.stats.bootstrap" is vectorized, thus having the ability to accept a
multi-dimensional statistic function and process all dimensions.
Unfortunately, as of March 4th, 2022, the SciPy implementation has two issues: 1) it only returns a confidence
interval and not a point estimate for the population parameter of interest, which is what we require for our use
cases. 2) It can not handle multi-dimensional statistics and correct for bias simultaneously. You must either use
one feature or the other.
This implementation could only be replaced by "scipy.stats.bootstrap" if Great Expectations drops support for
Python 3.6, thereby enabling us to use a more up-to-date version of the "scipy" Python package (the currently used
version does not have "bootstrap"). Also, as discussed above, two contributions would need to be made to the SciPy
package to enable 1) bias correction for multi-dimensional statistics and 2) a return value of a point estimate for
the population parameter of interest (lower and upper quantiles in this case).
Additional future direction could include developing enhancements to bootstrapped estimator based on theory
presented in "http://dido.econ.yale.edu/~dwka/pub/p1001.pdf":
@article{Andrews2000a,
added-at = {2008-04-25T10:38:44.000+0200},
author = {Andrews, Donald W. K. and Buchinsky, Moshe},
biburl = {https://www.bibsonomy.org/bibtex/28e2f0a58cdb95e39659921f989a17bdd/smicha},
day = 01,
interhash = {778746398daa9ba63bdd95391f1efd37},
intrahash = {8e2f0a58cdb95e39659921f989a17bdd},
journal = {Econometrica},
keywords = {imported},
month = Jan,
note = {doi: 10.1111/1468-0262.00092},
number = 1,
pages = {23--51},
timestamp = {2008-04-25T10:38:52.000+0200},
title = {A Three-step Method for Choosing the Number of Bootstrap Repetitions},
url = {http://www.blackwell-synergy.com/doi/abs/10.1111/1468-0262.00092},
volume = 68,
year = 2000
}
The article outlines a three-step minimax procedure that relies on the Central Limit Theorem (C.L.T.) along with the
bootstrap sampling technique (see https://en.wikipedia.org/wiki/Bootstrapping_(statistics) for background) for
computing the stopping criterion, expressed as the optimal number of bootstrap samples, needed to achieve a maximum
probability that the value of the statistic of interest will be minimally deviating from its actual (ideal) value.
"""
lower_quantile_pct: float = false_positive_rate / 2
upper_quantile_pct: float = 1.0 - false_positive_rate / 2
sample_lower_quantile: np.ndarray = np.quantile(metric_values, q=lower_quantile_pct)
sample_upper_quantile: np.ndarray = np.quantile(metric_values, q=upper_quantile_pct)
if random_seed:
random_state: np.random.Generator = np.random.Generator(
np.random.PCG64(random_seed)
)
bootstraps: np.ndarray = random_state.choice(
metric_values, size=(n_resamples, metric_values.size)
)
else:
bootstraps: np.ndarray = np.random.choice(
metric_values, size=(n_resamples, metric_values.size)
)
bootstrap_lower_quantiles: Union[np.ndarray, Number] = np.quantile(
bootstraps,
q=lower_quantile_pct,
axis=1,
)
bootstrap_lower_quantile_point_estimate: float = np.mean(bootstrap_lower_quantiles)
bootstrap_lower_quantile_standard_error: float = np.std(bootstrap_lower_quantiles)
bootstrap_lower_quantile_bias: float = (
bootstrap_lower_quantile_point_estimate - sample_lower_quantile
)
# Bias / Standard Error > 0.25 is a rule of thumb for when to apply bias correction.
# See:
# Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 128).
# Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
lower_quantile_bias_corrected_point_estimate: Number
if bootstrap_lower_quantile_bias / bootstrap_lower_quantile_standard_error <= 0.25:
lower_quantile_bias_corrected_point_estimate = (
bootstrap_lower_quantile_point_estimate
)
else:
lower_quantile_bias_corrected_point_estimate = (
bootstrap_lower_quantile_point_estimate - bootstrap_lower_quantile_bias
)
bootstrap_upper_quantiles: Union[np.ndarray, Number] = np.quantile(
bootstraps,
q=upper_quantile_pct,
axis=1,
)
bootstrap_upper_quantile_point_estimate: np.ndarray = np.mean(
bootstrap_upper_quantiles
)
bootstrap_upper_quantile_standard_error: np.ndarray = np.std(
bootstrap_upper_quantiles
)
bootstrap_upper_quantile_bias: float = (
bootstrap_upper_quantile_point_estimate - sample_upper_quantile
)
# Bias / Standard Error > 0.25 is a rule of thumb for when to apply bias correction.
# See:
# Efron, B., & Tibshirani, R. J. (1993). Estimates of bias. An Introduction to the Bootstrap (pp. 128).
# Springer Science and Business Media Dordrecht. DOI 10.1007/978-1-4899-4541-9
upper_quantile_bias_corrected_point_estimate: Number
if bootstrap_upper_quantile_bias / bootstrap_upper_quantile_standard_error <= 0.25:
upper_quantile_bias_corrected_point_estimate = (
bootstrap_upper_quantile_point_estimate
)
else:
upper_quantile_bias_corrected_point_estimate = (
bootstrap_upper_quantile_point_estimate - bootstrap_upper_quantile_bias
)
return (
lower_quantile_bias_corrected_point_estimate,
upper_quantile_bias_corrected_point_estimate,
) | 50494c15ded4b9cd7c54f4262f7d9b2137d2bd4f | 3,658,508 |
def bytes_to_b64(data: bytes, remove_padding=True) -> str:
"""
byte string to URL safe Base64 string, with option to remove B64 LSB padding
:param data: byte string
:param remove_padding: remove b64 padding (``=`` char). True by default
:return: base64 unicode string
"""
text = urlsafe_b64encode(data).decode()
if remove_padding:
return text.replace('=', '')
else:
return text | 8ca495948eb72ab6bb8bf95ae62b4d370a04cbe3 | 3,658,509 |
import re
def _case_sensitive_replace(string, old, new):
"""
Replace text, retaining exact case.
Args:
string (str): String in which to perform replacement.
old (str): Word or substring to replace.
new (str): What to replace `old` with.
Returns:
repl_string (str): Version of string where instances of
`old` has been replaced with `new`, retaining case.
"""
def repl(match):
current = match.group()
# treat multi-word sentences word-by-word
old_words = current.split(" ")
new_words = new.split(" ")
out = []
for old_word, new_word in zip(old_words, new_words):
result = []
all_upper = True
for ind, chr in enumerate(old_word):
if ind >= len(new):
break
if chr.isupper():
result.append(new_word[ind].upper())
else:
result.append(new_word[ind].lower())
all_upper = False
# special cases - keep remaing case)
if new_word.lower() in CASE_WORD_EXCEPTIONS:
result.append(new_word[ind + 1 :])
# append any remaining characters from new
elif all_upper:
result.append(new_word[ind + 1 :].upper())
else:
result.append(new_word[ind + 1 :].lower())
out.append("".join(result))
# if we have more new words than old ones, just add them verbatim
out.extend([new_word for ind, new_word in enumerate(new_words) if ind >= len(old_words)])
return " ".join(out)
if string is None:
return None
regex = re.compile(re.escape(old), re.I)
return regex.sub(repl, string) | bf20636146b42f67ec3ad0b4a00a80a9d6cb9ce6 | 3,658,510 |
from typing import Dict
from typing import Any
def deserialize_transaction_from_etherscan(
data: Dict[str, Any],
internal: bool,
) -> EthereumTransaction:
"""Reads dict data of a transaction from etherscan and deserializes it
Can throw DeserializationError if something is wrong
"""
try:
# internal tx list contains no gasprice
gas_price = FVal(-1) if internal else FVal(data['gasPrice'])
tx_hash = read_hash(data, 'hash')
input_data = read_hash(data, 'input')
timestamp = deserialize_timestamp(data['timeStamp'])
block_number = read_integer(data, 'blockNumber')
nonce = -1 if internal else read_integer(data, 'nonce')
return EthereumTransaction(
timestamp=timestamp,
block_number=block_number,
tx_hash=tx_hash,
from_address=data['from'],
to_address=data['to'],
value=deserialize_fval(data['value']),
gas=deserialize_fval(data['gas']),
gas_price=gas_price,
gas_used=deserialize_fval(data['gasUsed']),
input_data=input_data,
nonce=nonce,
)
except KeyError as e:
raise DeserializationError(f'Etherscan ethereum transaction missing expected key {str(e)}') | c4184cea626b229a7c0de8848f95fb29ebdec6d3 | 3,658,511 |
def ar(p):
"""
Given a quaternion p, return the 4x4 matrix A_R(p)
which when multiplied with a column vector q gives
the quaternion product qp.
Parameters
----------
p : numpy.ndarray
4 elements, represents quaternion
Returns
-------
numpy.ndarray
4x4 matrix describing action of quaternion multiplication
"""
return np.array([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], p[3], -p[2]],
[p[2], -p[3], p[0], p[1]],
[p[3], p[2], -p[1], p[0]]]) | 0ee437eec9b62c902466de4e77b541fc3cb7a64a | 3,658,512 |
def preprocess_list(lst,tokenizer,max_len=None):
"""
function preprocesses a list of values returning tokenized sequences
Args:
lst: list of strings to be processed
tokenizer: a tokenizer object
max_len: if we need to ensure the same length of strings, we can provide an integer here
Returns:
a numpy array with tokenized sequences. Each sequence in a separate row
"""
return_seq = tokenizer.texts_to_sequences(lst)
seq = np.array(
pad_sequences(return_seq, maxlen=max_len,padding="post"),
dtype="float32"
)
return seq | c1ba91ae54b9869ac6dd80664b479a47c34388e2 | 3,658,513 |
from datetime import datetime
import sys
def get_GEOS5_as_ds_via_OPeNDAP(collection='inst3_3d_aer_Nv',
fcast_start_hour=12,
mode='seamless', dt=None):
"""
Get the GEOS-5 model product (GEOS-5) as a xr.Dataset (using OPeNDAP)
Parameters
----------
mode (str): retrieve the forecast (fcast) or assimilated fields (assim) or both
(seemless)
dt (datetime.datetime): date to retrieve forecast from or assimilation for
collection (str): data collection to access (e.g. chm_inst_1hr_g1440x721_p23)
fcast_start_hour (int): hour the forcast started on a given day
Returns
-------
(xr.dataset)
NOTES
---
- default is to get the latest forecast for chemistry (via seamless route)
- See documentation for details: https://geos5.org/wiki/index.php?title=GEOS-5_Earth_System_Modeling_and_Data_Assimilation
- Collections include:
- The forecast set going at different times are for different length.
00 - ~10 days
06 - ~1.5 days
12 - ~5 days
18 - ~1.5 days
- the 5 day forecast for a given day is selected as default (fcast_start_hour)
"""
# Root OPeNDAP directory
root_url = 'https://opendap.nccs.nasa.gov/dods/GEOS-5/fp/0.25_deg/{}/'
root_url = root_url.format(mode)
# Make up the complete URL for a forecast or assimilation field
if (mode == 'fcast') or (mode == 'seamless'):
# Which date to use?
if isinstance(dt, type(None)):
# Use the lastest file (default)
URL = '{}/{}.latest'.format(root_url, collection)
else:
# Use a file specified in arguments
correct_type = type(dt) == datetime.datetime
assert correct_type, "'date' variable must be a datetime.datetime object"
# Use the 'lastest' file (default)
# NOTE: lastest 12 file is used to match up with GEOS-CF
# TODO: update this. this will not give enough data
dstr = dt.strftime(format='%Y%m%d')
URL = '{}/{}/{}.{}_{:0>2}'
URL = URL.format(root_url, collection, collection, dstr, fcast_start_hour)
elif mode == 'assim':
# Just retrieve an OPeNDAP pointer to the entire dataset for now
URL = '{}/{}'.format(root_url, collection)
else:
print("WARNING: GEOS-5 mode provided ('{}') not known".format(mode))
sys.exit()
# Open the dataset via OPeNDAP and return
ds = xr.open_dataset(URL)
return ds | 2e31229d8fdbdd1cfb38d52fb2160bdc86d56453 | 3,658,514 |
def to_dataframe(ticks: list) -> pd.DataFrame:
"""Convert list to Series compatible with the library."""
df = pd.DataFrame(ticks)
df['time'] = pd.to_datetime(df['time'], unit='s')
df.set_index("time", inplace=True)
return df | 6f312e9e8f401d21cebc1404a24ba37738a2819d | 3,658,515 |
def keysCode(code):
"""
Download user's keys from an email link
GET: If the code is valid, download user keys
Else abort with a 404
"""
#Check if code exists and for the correct purpose. Else abort
if (hl.checkCode(code,"Keys")):
user = hl.getUserFromCode(code)
else:
abort(404)
#Mark code as used
hl.flagCode(code)
#return
return getKeys(user["Name"]) | 533f17cd4a2fb999f6ffd135a1e647f48266a04c | 3,658,516 |
def lengthenFEN(fen):
"""Lengthen FEN to 71-character form (ex. '3p2Q' becomes '111p11Q')"""
return fen.replace('8','11111111').replace('7','1111111') \
.replace('6','111111').replace('5','11111') \
.replace('4','1111').replace('3','111').replace('2','11') | f49cdf8ad6919fbaaad1abc83e24b1a33a3ed3f8 | 3,658,517 |
def keyboard_mapping(display):
"""Generates a mapping from *keysyms* to *key codes* and required
modifier shift states.
:param Xlib.display.Display display: The display for which to retrieve the
keyboard mapping.
:return: the keyboard mapping
"""
mapping = {}
shift_mask = 1 << 0
group_mask = alt_gr_mask(display)
# Iterate over all keysym lists in the keyboard mapping
min_keycode = display.display.info.min_keycode
keycode_count = display.display.info.max_keycode - min_keycode + 1
for index, keysyms in enumerate(display.get_keyboard_mapping(
min_keycode, keycode_count)):
key_code = index + min_keycode
# Normalise the keysym list to yield a tuple containing the two groups
normalized = keysym_normalize(keysyms)
if not normalized:
continue
# Iterate over the groups to extract the shift and modifier state
for groups, group in zip(normalized, (False, True)):
for keysym, shift in zip(groups, (False, True)):
if not keysym:
continue
shift_state = 0 \
| (shift_mask if shift else 0) \
| (group_mask if group else 0)
# Prefer already known lesser shift states
if keysym in mapping and mapping[keysym][1] < shift_state:
continue
mapping[keysym] = (key_code, shift_state)
return mapping | c9d2e0caea532ab66b00744d17ff6274f42844e9 | 3,658,518 |
def convertPeaks(peaksfile, bedfile):
"""Convert a MACS output file `peaksfile' to a BED file. Also works if the input is already in BED format."""
regnum = 1
with open(bedfile, "w") as out:
with open(peaksfile, "r") as f:
tot = 0
chrom = ""
start = 0
end = 0
c = CSVreader(f)
for line in c:
if len(line) == 0 or line[0][0] == '#' or line[0] == 'chr':
continue
bchrom = line[0]
if "_" in bchrom: # get rid of weird chromosomes
continue
# New chromosome?
if bchrom != chrom:
if end > 0:
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
regnum += 1
chrom = bchrom
start = 0
end = 0
# Unwanted chromosome?
if bchrom == 'chrM' or "random" in bchrom:
start = 0
end = 0
continue
# Good line
bstart = int(line[1])
bend = int(line[2])
if start <= bstart <= end:
# Extend current region
end = bend
else:
# Start new region
tot += (end - start)
if end > 0:
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
regnum += 1
start = bstart
end = bend
out.write("{}\t{}\t{}\treg{}\t{}\t+\n".format(chrom, start, end, regnum, regnum))
tot += (end - start)
return (tot, regnum) | 6c9af82254efb98d35c9182ebe53c4f3802cdb7f | 3,658,519 |
def create_freud_box(box: np.ndarray, is_2D=True) -> Box:
"""Convert an array of box values to a box for use with freud functions
The freud package has a special type for the description of the simulation cell, the
Box class. This is a function to take an array of lengths and tilts to simplify the
creation of the Box class for use with freud.
"""
# pylint: disable=invalid-name
Lx, Ly, Lz = box[:3]
xy = xz = yz = 0
if len(box) == 6:
xy, xz, yz = box[3:6]
if is_2D:
return Box(Lx=Lx, Ly=Ly, xy=xy, is2D=is_2D)
return Box(Lx=Lx, Ly=Ly, Lz=Lz, xy=xy, xz=xz, yz=yz)
# pylint: enable=invalid-name | 94ea3769d8138907bf29a30fc8afcf6b990264f1 | 3,658,520 |
def hrrr_snotel_pixel(file, x_pixel_index, y_pixel_index):
"""
Read GRIB file surface values, remove unsed dimensions, and
set the time dimension.
Required to be able to concatenate all GRIB file to a time series
"""
hrrr_file = xr.open_dataset(
file.as_posix(),
engine='cfgrib',
backend_kwargs={
'errors': 'ignore',
'indexpath': '',
'filter_by_keys': {
'level': 0,
'typeOfLevel': 'surface',
}
},
).isel(x=[x_pixel_index], y=[y_pixel_index])
del hrrr_file.coords['valid_time']
del hrrr_file.coords['surface']
del hrrr_file.coords['step']
return hrrr_file.expand_dims(time=[hrrr_file.time.values]) | 22a66317d672874b9ababfd0a7daa364d06ea87e | 3,658,521 |
def convert_to_diact_uttseg_interactive_tag(previous, tag):
"""Returns the dialogue act but with the fact it is keeping or
taking the turn.
"""
if not previous:
previous = ""
trp_tag = uttseg_pattern(tag)
return trp_tag.format(convert_to_diact_interactive_tag(previous, tag)) | 06950132147d374002495d92e456fe52a6d9546f | 3,658,522 |
from mne.chpi import compute_chpi_amplitudes, compute_chpi_locs
from mne.chpi import _get_hpi_initial_fit
def compute_good_coils(raw, t_step=0.01, t_window=0.2, dist_limit=0.005,
prefix='', gof_limit=0.98, verbose=None):
"""Comute time-varying coil distances."""
try:
except ImportError:
chpi_locs = _old_chpi_locs(raw, t_step, t_window, prefix)
else:
chpi_amps = compute_chpi_amplitudes(
raw, t_step_min=t_step, t_window=t_window)
chpi_locs = compute_chpi_locs(raw.info, chpi_amps)
hpi_dig_head_rrs = _get_hpi_initial_fit(raw.info, verbose=False)
hpi_coil_dists = cdist(hpi_dig_head_rrs, hpi_dig_head_rrs)
counts = np.empty(len(chpi_locs['times']), int)
for ii, (t, coil_dev_rrs, gof) in enumerate(zip(
chpi_locs['times'], chpi_locs['rrs'], chpi_locs['gofs'])):
these_dists = cdist(coil_dev_rrs, coil_dev_rrs)
these_dists = np.abs(hpi_coil_dists - these_dists)
# there is probably a better algorithm for finding the bad ones...
use_mask = gof >= gof_limit
good = False
while not good:
d = these_dists[use_mask][:, use_mask]
d_bad = d > dist_limit
good = not d_bad.any()
if not good:
if use_mask.sum() == 2:
use_mask[:] = False
break # failure
# exclude next worst point
badness = (d * d_bad).sum(axis=0)
exclude_coils = np.where(use_mask)[0][np.argmax(badness)]
use_mask[exclude_coils] = False
counts[ii] = use_mask.sum()
t = chpi_locs['times'] - raw.first_samp / raw.info['sfreq']
return t, counts, len(hpi_dig_head_rrs), chpi_locs | 060658dfae82768a5dff31a365f1c200d6f5d223 | 3,658,523 |
def prep_request(items, local_id="id"):
"""
Process the incoming items into an AMR request.
<map name="cite_1">
<val name="{id_type}">{value}</val>
</map>
"""
map_items = ET.Element("map")
for idx, pub in enumerate(items):
if pub is None:
continue
local_id_value = pub.get(local_id) or pub.get(local_id.upper())
if local_id_value is None:
local_id_value = str(idx)
this_item = ET.Element("map", name=local_id_value)
for k, v in pub.items():
if v is None:
continue
de = ET.Element("val", name=k.lower())
de.text = v.strip()
this_item.append(de)
map_items.append(this_item)
request_items = ET.tostring(map_items)
xml = id_request_template.format(user=client.USER, password=client.PASSWORD, items=request_items)
return xml | 46f1f7a94ffccc4eec2192fe100664c3d9e2d829 | 3,658,524 |
from averages_module import VariableType
from lrc_module import potential_lrc, pressure_lrc
def calc_variables ( ):
"""Calculates all variables of interest.
They are collected and returned as a list, for use in the main program.
"""
# In this example we simulate using the cut (but not shifted) potential
# but we only report results which have had the long-range corrections applied
# The value of the cut-and-shifted potential is not used, in this example
# Preliminary calculations (n,r,total are taken from the calling program)
vol = box**3 # Volume
rho = n / vol # Density
kin = 1.5 * n * p * temperature # Average kinetic energy for NP-atom system
kin_q = kin - total_spr # Quantum estimator for kinetic energy
rad_g = rad_gyr ( r )
# Variables of interest, of class VariableType, containing three attributes:
# .val: the instantaneous value
# .nam: used for headings
# .method: indicating averaging method
# If not set below, .method adopts its default value of avg
# The .nam and some other attributes need only be defined once, at the start of the program,
# but for clarity and readability we assign all the values together below
# Acceptance ratio of atomic moves
r_r = VariableType ( nam = 'Atomic move ratio', val = r_ratio, instant = False )
# Acceptance ratio of centre-of-mass moves
c_r = VariableType ( nam = 'COM move ratio', val = c_ratio, instant = False )
# Internal energy per atom for full potential with LRC
# LRC plus cut (but not shifted) PE already divided by factor P
# plus KE estimator: total classical KE for NP-atom system MINUS total spring potential
# all divided by N
e_f = VariableType ( nam = 'E/N full', val = potential_lrc(rho,r_cut) + (kin_q+total.pot)/n )
# Kinetic energy per atom, just for interest's sake
k_q = VariableType ( nam = 'KE/N', val = kin_q/n )
# Pressure for full potential with LRC
# LRC plus ideal gas contribution plus total virial divided by V
kin_q = kin_q / 1.5 # Convert KE estimator to kinetic energy part of pressure
p_f = VariableType ( nam = 'P full', val = pressure_lrc(rho,r_cut) + (kin_q+total.vir)/vol )
# Quantum spring energy per atom, just for interest's sake
e_q = VariableType ( nam = 'Espring/N', val = total_spr/n )
# Quantum polymer radius of gyration, just for interest's sake
r_g = VariableType ( nam = 'Radius of gyration', val = rad_g )
# Collect together into a list for averaging
return [ r_r, c_r, e_f, p_f, e_q, k_q, r_g ] | 4d0c066ccf4da82955a60d22c0ec27efc975df6d | 3,658,525 |
import matplotlib.pyplot as plt
import pandas as pd
import logging
import pandas as pd
import xarray as xr
def analyse_results_ds_one_station(dss, field='WetZ', verbose=None,
plot=False):
"""analyse and find an overlapping signal to fields 'WetZ' or 'WetZ_error'
in dss"""
# algorithm for zwd stitching of 30hrs gipsyx runs:
# just take the mean of the two overlapping signals
# and then smooth is with savgol_filter using 3 hours more data in each
# direction...
def select_two_ds_from_gipsyx_results(ds, names=['WetZ_0', 'WetZ_1'],
hours_offset=None):
"""selects two dataarrays from the raw gipsyx results dataset"""
time0 = list(set(ds[names[0]].dims))[0]
time1 = list(set(ds[names[1]].dims))[0]
time = list(set(ds[names[0]][time0].values).intersection(set(ds[names[1]][time1].values)))
# time = dim_intersection([ds[names[0]], ds[names[1]]], dim='time')
if not time:
return None
time = sorted(pd.to_datetime(time))
if hours_offset is not None:
# freq = pd.infer_freq(time)
start = time[0] - pd.DateOffset(hours=hours_offset)
end = time[-1] + pd.DateOffset(hours=hours_offset)
# time = pd.date_range(start, end, freq=freq)
first = ds[names[0]].sel({time0: slice(start, end)})
second = ds[names[1]].sel({time1: slice(start, end)})
else:
first = ds[names[0]].sel({time0: time})
second = ds[names[1]].sel({time1: time})
first = first.rename({time0: 'time'})
second = second.rename({time1: 'time'})
two = xr.Dataset()
two[first.name] = first
two[second.name] = second
df = two.to_dataframe()
return df
logger = logging.getLogger('gipsyx_post_proccesser')
if verbose == 0:
logger.info('analysing {} field.'.format(field))
# first, group different vars for different stitching schemes:
to_smooth = ['GradEast', 'GradNorth', 'WetZ']
to_simple_mean = ['X', 'Y', 'Z']
to_error_mean = [x + '_error' for x in to_smooth] + [x + '_error' for x in
to_simple_mean]
# second, select the field to work on:
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
df_list = []
for i, _ in enumerate(ds):
if i == len(ds) - 1:
break
first = ds['{}-{}'.format(field, i)]
second = ds['{}-{}'.format(field, i + 1)]
if verbose == 1:
print('proccesing {} and {}'.format(first.name, second.name))
# 3 hours addition to each side:
df = select_two_ds_from_gipsyx_results(ds, [first.name, second.name],
3)
if df is not None:
if field in to_smooth:
wn = 25
order = 3
stitched = stitch_two_cols(df, wn, order, method='smooth_mean')
action = 'stitched and replaced daily discontinuities '\
'with smooth(savgol filter, window:{}, order:{}) mean'.format(wn, order)
elif field in to_simple_mean:
stitched = stitch_two_cols(df, method='simple_mean')
action = 'stitched and replaced daily discontinuities '\
'with simple mean'
elif field in to_error_mean:
stitched = stitch_two_cols(df, method='error_mean')
action = 'stitched and replaced daily discontinuities '\
'with error mean (sqrt(errorA^2 + errorB^2))'
df_list.append(stitched)
# df_list.append(find_cross_points(df, None))
elif df is None:
if verbose:
logger.warning('skipping {} and {}...'.format(first.name, second.name))
da = pd.concat([x['stitched_signal'] for x in df_list]).to_xarray()
attrs_list = [(x, y)
for x, y in dss.attrs.items() if field == x.split('>')[0]]
attrs_list.append(('{}>action'.format(field), action))
for items in attrs_list:
da.attrs[items[0]] = items[1]
da.attrs['station'] = dss.attrs['station']
if plot:
fig, ax = plt.subplots(figsize=(16, 5))
da.plot.line(marker='.', linewidth=0., ax=ax, color='k')
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax)
units = dss.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
fig.suptitle('30 hours stitched {} for GNSS station {}'.format(desc, sta), fontweight='bold')
fig.tight_layout()
fig.subplots_adjust(top=0.95)
ax.grid()
# dfs = []
# for df in df_list:
# # check if there is an offset:
# A = df.columns.values[0]
# B = df.columns.values[1]
# if all([x is None for x in df.Cross]):
# offset = df.Diff.median()
# df['{}_new'.format(B)] = df[B] + offset
# dfs.append(df)
return da | 15f0248af152c31231af9e5a6a586d61e3e3ed9a | 3,658,526 |
def findDocument_MergeFields(document):
"""this function creates a new docx document based on
a template with Merge fields and a JSON content"""
the_document = MailMerge(document)
all_fields = the_document.get_merge_fields()
res = {element:'' for element in all_fields}
return res | 9822f40e5f57bbc72f9292da9bd2a1c134776c2f | 3,658,527 |
def load_mushroom(data_home=None, return_dataset=False):
"""
Loads the mushroom multivariate dataset that is well suited to binary
classification tasks. The dataset contains 8123 instances with 3
categorical attributes and a discrete target.
The Yellowbrick datasets are hosted online and when requested, the dataset
is downloaded to your local computer for use. Note that if the dataset
hasn't been downloaded before, an Internet connection is required. However,
if the data is cached locally, no data will be downloaded. Yellowbrick
checks the known signature of the dataset with the data downloaded to
ensure the download completes successfully.
Datasets are stored alongside the code, but the location can be specified
with the ``data_home`` parameter or the $YELLOWBRICK_DATA envvar.
Parameters
----------
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
return_dataset : bool, default=False
Return the raw dataset object instead of X and y numpy arrays to
get access to alternative targets, extra features, content and meta.
Returns
-------
X : array-like with shape (n_instances, n_features) if return_dataset=False
A pandas DataFrame or numpy array describing the instance features.
y : array-like with shape (n_instances,) if return_dataset=False
A pandas Series or numpy array describing the target vector.
dataset : Dataset instance if return_dataset=True
The Yellowbrick Dataset object provides an interface to accessing the
data in a variety of formats as well as associated metadata and content.
"""
return _load_dataset('mushroom', data_home, return_dataset) | e300a1cade8532d18ebea1f5175d9c3001112855 | 3,658,528 |
def get_current_project(user_id):
"""Return from database user current project"""
try:
current = CurrentProject.objects.get(user_id=user_id)
except CurrentProject.DoesNotExist:
return None
keystone = KeystoneNoRequest()
return keystone.project_get(current.project) | dc8b1cf44ccd4c51bf58615657520007f2eca5db | 3,658,529 |
def get_random_successful_answer(intent: str) -> str:
"""
Get a random successful answer for this intent
* `intent`: name-parameter of the yml-section with which the successful answers were imported
**Returns:** None if no successful answers are known for this intent,
otherwise a random element of the successful answers for this intent
"""
return random_or_none(get_successful_answer_list(intent)) | e8106adff5f5a45c5b5e0ff12130d828fa2f4a55 | 3,658,530 |
from typing import Any
def formatter(
source: str,
language: str,
css_class: str,
options: dict[str, Any],
md: Markdown,
classes: list[str] | None = None,
id_value: str = "",
attrs: dict[str, Any] | None = None,
**kwargs: Any,
) -> str:
"""Execute code and return HTML.
Parameters:
source: The code to execute.
language: The code language, like python or bash.
css_class: The CSS class to add to the HTML element.
options: The container for options.
attrs: The container for attrs:
md: The Markdown instance.
classes: Additional CSS classes.
id_value: An optional HTML id.
attrs: Additional attributes
**kwargs: Additional arguments passed to SuperFences default formatters.
Returns:
HTML contents.
"""
fmt = _formatters.get(language, lambda source, *args, **kwargs: source)
return fmt(source, md, **options) | f141732ff6bd5d3bd7cc1a83895b0e2c020bf8cf | 3,658,531 |
def find_visible(vertex_candidates, edges_to_check):
"""
# IMPORTANT: self.translate(new_origin=query_vertex) always has to be called before!
(for computing the angle representations wrt. the query vertex)
query_vertex: a vertex for which the visibility to the vertices should be checked.
also non extremity vertices, polygon vertices and vertices with the same coordinates are allowed.
query point also might lie directly on an edge! (angle = 180deg)
:param vertex_candidates: the set of all vertices which should be checked for visibility.
IMPORTANT: is being manipulated, so has to be a copy!
IMPORTANT: must not contain the query vertex!
:param edges_to_check: the set of edges which determine visibility
:return: a set of tuples of all vertices visible from the query vertex and the corresponding distance
"""
visible_vertices = set()
if len(vertex_candidates) == 0:
return visible_vertices
priority_edges = set()
# goal: eliminating all vertices lying 'behind' any edge
# TODO improvement in combination with priority: process edges roughly in sequence, but still allow jumps
# would follow closer edges more often which have a bigger chance to eliminate candidates -> speed up
while len(vertex_candidates) > 0 and len(edges_to_check) > 0:
# check prioritized items first
try:
edge = priority_edges.pop()
edges_to_check.remove(edge)
except KeyError:
edge = edges_to_check.pop()
lies_on_edge = False
v1, v2 = edge.vertex1, edge.vertex2
if v1.get_distance_to_origin() == 0.0:
# vertex1 has the same coordinates as the query vertex -> on the edge
lies_on_edge = True
# (but does not belong to the same polygon, not identical!)
# mark this vertex as not visible (would otherwise add 0 distance edge in the graph)
vertex_candidates.discard(v1)
# its angle representation is not defined (no line segment from vertex1 to query vertex!)
range_less_180 = v1.is_extremity
# do not check the other neighbouring edge of vertex1 in the future
e1 = v1.edge1
edges_to_check.discard(e1)
priority_edges.discard(e1)
# everything between its two neighbouring edges is not visible for sure
v1, v2 = v1.get_neighbours()
elif v2.get_distance_to_origin() == 0.0:
lies_on_edge = True
vertex_candidates.discard(v2)
range_less_180 = v2.is_extremity
e1 = v2.edge2
edges_to_check.discard(e1)
priority_edges.discard(e1)
v1, v2 = v2.get_neighbours()
repr1 = v1.get_angle_representation()
repr2 = v2.get_angle_representation()
repr_diff = abs(repr1 - repr2)
if repr_diff == 2.0:
# angle == 180deg -> on the edge
lies_on_edge = True
range_less_180 = False # does actually not matter here
if lies_on_edge:
# when the query vertex lies on an edge (or vertex) no behind/in front checks must be performed!
# the neighbouring edges are visible for sure
try:
vertex_candidates.remove(v1)
visible_vertices.add(v1)
except KeyError:
pass
try:
vertex_candidates.remove(v2)
visible_vertices.add(v2)
except KeyError:
pass
# all the candidates between the two vertices v1 v2 are not visible for sure
# candidates with the same representation should not be deleted, because they can be visible!
vertex_candidates.difference_update(
find_within_range(repr1, repr2, repr_diff, vertex_candidates, angle_range_less_180=range_less_180,
equal_repr_allowed=False))
continue
# case: a 'regular' edge
# eliminate all candidates which are blocked by the edge
# that means inside the angle range spanned by the edge and actually behind it
vertices_to_check = vertex_candidates.copy()
# the vertices belonging to the edge itself (its vertices) must not be checked.
# use discard() instead of remove() to not raise an error (they might not be candidates)
vertices_to_check.discard(v1)
vertices_to_check.discard(v2)
if len(vertices_to_check) == 0:
continue
# assert repr1 is not None
# assert repr2 is not None
# for all candidate edges check if there are any candidate vertices (besides the ones belonging to the edge)
# within this angle range
# the "view range" of an edge from a query point (spanned by the two vertices of the edge)
# is always < 180deg when the edge is not running through the query point (=180 deg)
# candidates with the same representation as v1 or v2 should be considered.
# they can be visible, but should be ruled out if they lie behind any edge!
vertices_to_check = find_within_range(repr1, repr2, repr_diff, vertices_to_check, angle_range_less_180=True,
equal_repr_allowed=True)
if len(vertices_to_check) == 0:
continue
# if a candidate is farther away from the query point than both vertices of the edge,
# it surely lies behind the edge
max_distance = max(v1.get_distance_to_origin(), v2.get_distance_to_origin())
vertices_behind = set(filter(lambda extr: extr.get_distance_to_origin() > max_distance, vertices_to_check))
# they do not have to be checked, no intersection computation necessary
# TODO improvement: increase the neighbouring edges' priorities when there were extremities behind
vertices_to_check.difference_update(vertices_behind)
if len(vertices_to_check) == 0:
# also done later, only needed if skipping this edge
vertex_candidates.difference_update(vertices_behind)
continue
# if the candidate is closer than both edge vertices it surely lies in front (
min_distance = min(v1.get_distance_to_origin(), v2.get_distance_to_origin())
vertices_in_front = set(
filter(lambda extr: extr.get_distance_to_origin() < min_distance, vertices_to_check))
# they do not have to be checked (safes computation)
vertices_to_check.difference_update(vertices_in_front)
# for all remaining vertices v it has to be tested if the line segment from query point (=origin) to v
# has an intersection with the current edge p1---p2
# vertices directly on the edge are allowed (not eliminated)!
p1 = v1.get_coordinates_translated()
p2 = v2.get_coordinates_translated()
for vertex in vertices_to_check:
if lies_behind(p1, p2, vertex.get_coordinates_translated()):
vertices_behind.add(vertex)
else:
vertices_in_front.add(vertex)
# vertices behind any edge are not visible
vertex_candidates.difference_update(vertices_behind)
# if there are no more candidates left. immediately quit checking edges
if len(vertex_candidates) == 0:
break
# check the neighbouring edges of all vertices which lie in front of the edge next first
# (prioritize them)
# they lie in front and hence will eliminate other vertices faster
# the fewer vertex candidates remain, the faster the procedure
# TODO improvement: increase priority every time and draw highest priority items
# but this involves sorting (expensive for large polygons!)
# idea: work with a list of sets, add new set for higher priority, no real sorting, but still managing!
# TODO test speed impact
for e in vertices_in_front:
# only add the neighbour edges to the priority set if they still have to be checked!
if type(e) == PolygonVertex:
# only vertices belonging to polygons have neighbours
priority_edges.update(edges_to_check.intersection({e.edge1, e.edge2}))
# all edges have been checked
# all remaining vertices were not concealed behind any edge and hence are visible
visible_vertices.update(vertex_candidates)
# return a set of tuples: (vertex, distance)
return {(e, e.get_distance_to_origin()) for e in visible_vertices} | 07c1087fd603a1bccf18fe145706d7ea4491081c | 3,658,532 |
from typing import Iterable
from typing import Dict
import logging
def gather_data(
network_stats: Iterable, start_time: int, end_time: int, step: int
) -> Dict:
"""This function takes Prometheus data and reshapes it into a multi-level
dictionary of network name to link name to link dir to list of values."""
label_val_map: defaultdict = defaultdict(
lambda: defaultdict(lambda: defaultdict(list))
)
for network, prom_results in network_stats:
if prom_results is None:
continue
for query, values in prom_results.items():
logging.info(f"Processing data for network {network} and metric {query}")
if not values:
logging.debug(f"Found no {query} results for {network}")
continue
for result in values:
link_name = result["metric"][consts.link_name]
link_dir = result["metric"][consts.link_direction]
val_array = label_val_map[network][link_name][link_dir]
if len(val_array) == 0:
# Create empty array of length equal to duration_s sampled at step_s
val_array = [np.nan] * int((end_time - start_time) / step)
label_val_map[network][link_name][link_dir] = val_array
for timestamp, metric_value in result["values"]:
# Put values at the approporate index of array based on timestamp
val_array[int((int(timestamp) - start_time) / step - 1)] = int(
metric_value
)
return label_val_map | 9871c2f15f6c82b9af538393da40006977ced356 | 3,658,533 |
import requests
def get_balance_sheet(ticker, limit, key, period):
"""Get the Balance sheet."""
URL = 'https://financialmodelingprep.com/api/v3/balance-sheet-statement/'
try:
r = requests.get(
'{}{}?period={}&?limit={}&apikey={}'.format(URL,
ticker,
period,
limit,
key))
balanceSheet = pd.DataFrame.from_dict(r.json()).transpose()
balanceSheet.columns = balanceSheet.iloc[0]
return balanceSheet[1:]
except requests.exceptions.HTTPError as e:
# We want a 200 value
print('Requesting Balance sheet statement ERROR: ', str(e)) | ae31a9d97715e1bc8818f64df48c18c3a7c806a3 | 3,658,534 |
def softmax_loss(scores, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- scores: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dscores: Gradient of the loss with respect to x
"""
N, C = scores.shape
scores = scores - np.max(scores, 1, keepdims=True)
loss = np.sum(-1 * scores[np.arange(N), y]) + np.sum(np.log(np.sum(np.exp(scores), 1)))
loss /= N
scores_e = np.exp(scores)
dscores = scores_e / np.sum(scores_e, 1).reshape(N, 1)
dscores[np.arange(N), y] = dscores[np.arange(N), y] - 1
dscores /= N
return loss, dscores | 7cc0e4fc070ab0a8cdc32c75aec342dac34179ab | 3,658,535 |
import os
import pickle
def load_flags(save_dir, save_file="flags.obj"):
"""
This function inflate the pickled object to flags object for reuse, typically during evaluation (after training)
:param save_dir: The place where the obj is located
:param save_file: The file name of the file, usually flags.obj
:return: flags
"""
with open(os.path.join(save_dir, save_file), 'rb') as f: # Open the file
flags = pickle.load(f) # Use pickle to inflate the obj back to RAM
return flags | 44cc70f185645799fdfd81c8806f3d3f8585fef4 | 3,658,536 |
def text_to_lines(path):
"""
Parse a text file into lines.
Parameters
----------
path : str
Fully specified path to text file
Returns
-------
list
Non-empty lines in the text file
"""
delimiter = None
with open(path, encoding='utf-8-sig', mode='r') as f:
text = f.read()
if delimiter is not None and delimiter not in text:
e = DelimiterError(
'The delimiter specified does not create multiple words. Please specify another delimiter.')
raise (e)
lines = [x.strip().split(delimiter) for x in text.splitlines() if x.strip() != '']
return lines | df723ee40a490c084301584bd9374445ef73a5ae | 3,658,537 |
def measure_hemijunctions_timelapse(ims_labels, ims_labels_hjs):
"""
Measure the hemijunction traits from a timelapse of a live-imaged epithelium.
Parameters
----------
ims_labels : 3D ndarray (t,y,x)
Each timepoint is a 2D array with labeled regions.
ims_labels_hjs : 3D ndarray (t,y,x)
Each timepoint is a 2D array with hemijunctions labeled such that each one
has the same label as its "sending cell". Each "interface" spans a cell-cell
junction and is composed of two hemijunctions.
Returns
-------
df_hjs : pandas DataFrame
Each row is a single hemijunction from a single time step.
"""
# Total number of frames
total_t = np.shape(ims_labels)[0]
dfs = []
for t in range(total_t):
print(f"Measuring hemijunctions for timepoint {t} out of {total_t - 1}")
df_tmp = measure_hemijunctions(ims_labels[t], ims_labels_hjs[t])
# Add a column for t_step
df_tmp["t_step"] = [t] * len(df_tmp.index)
dfs.append(df_tmp)
df_hjs = pd.concat(dfs, ignore_index=True)
return df_hjs | c26779cd310a849843b20c8fc02539f972965c1a | 3,658,538 |
def get_compare_tables_checks_tasks():
"""Get list of tasks that will compare tables checks between databases.
Args:
Returns:
list: list of tasks to be executed in a process pool. Each item is a dict instance with following strucutre:
{
'function' (function): the function to be executed.
'kwds': keyworded args to be passed to the function.
}
"""
return [{
'function': compare_tables_checks,
'kwds': {}
}] | 9c210b1ebf43bffa6e2e9db0c53ebab5ba76c6bf | 3,658,539 |
from typing import Union
from typing import Set
def label_pr_failures(pull: Union[PullRequest, ShortPullRequest]) -> Set[str]:
"""
Labels the given pull request to indicate which checks are failing.
:param pull:
:return: The new labels set for the pull request.
"""
pr_checks = get_checks_for_pr(pull)
failure_labels: Set[str] = set()
success_labels: Set[str] = set()
def determine_labels(from_, to):
for check in from_:
if _python_dev_re.match(check):
continue
if check in {"Flake8", "docs"}:
to.add(f"failure: {check.lower()}")
elif check.startswith("mypy"):
to.add("failure: mypy")
elif check.startswith("ubuntu"):
to.add("failure: Linux")
elif check.startswith("windows"):
to.add("failure: Windows")
determine_labels(pr_checks.failing, failure_labels)
determine_labels(pr_checks.successful, success_labels)
issue: Issue = pull.issue()
current_labels = {label.name for label in issue.labels()}
for label in success_labels:
if label in current_labels and label not in failure_labels:
issue.remove_label(label)
new_labels = current_labels - success_labels
new_labels.update(failure_labels)
if new_labels != current_labels:
issue.add_labels(*new_labels)
return new_labels | ad36f23aa9e3d695e0ddab5a165e5665fdccf91c | 3,658,540 |
from typing import Optional
import logging
import os
def _replace_folder_path(path: str, from_folder: str, to_folder: str) -> Optional[str]:
"""Changes the path from the source ('from') folder to the destination ('to') folder
Arguments:
path: the path to adjust
from_folder: the folder to change from
to_folder: the folder to change the path to
Return:
A copy of the path with the folder changed when 'path' starts with 'from_folder', othwerwise
None is returned
Notes:
Only fully qualified partial paths are considered valid. Thus, '/a/b/c' is NOT considered the start of path '/a/b/concord', but
is the considered the start of '/a/b/c' and '/a/b/c/dogs.csv'
"""
# Make sure we have a legitimate 'from' path
if not path.startswith(from_folder):
logging.debug('Replace folder path: original path "%s" doesn\'t start with expected folder "%s"', path, from_folder)
return None
check_idx = len(from_folder)
if from_folder[-1:] == '/' or from_folder[-1:] == '\\':
check_idx -= 1
if not path[check_idx] =='/' and not path[check_idx] =='\\':
return None
# Return the new path
rem = path[len(from_folder):]
if rem[0] == '/' or rem[0] == '\\':
rem = rem[1:]
return os.path.join(to_folder, rem) | dcbec19de1b1783a44ea7ec21c5cb46afeb44cba | 3,658,541 |
def arrange_images(total_width, total_height, *images_positions):
"""Return a composited image based on the (image, pos) arguments."""
result = mel.lib.common.new_image(total_height, total_width)
for image, pos in images_positions:
mel.lib.common.copy_image_into_image(image, result, pos[1], pos[0])
return result | 49e167b9b6eb1a8e76c8e2d65bc3fa419d91a8a1 | 3,658,542 |
from typing import Tuple
import importlib
def import_core_utilities() -> Tuple[ModuleType, ModuleType, ModuleType]:
"""Dynamically imports and return Tracing, Logging, and Metrics modules"""
return (
importlib.import_module(TRACING_PACKAGE),
importlib.import_module(LOGGING_PACKAGE),
importlib.import_module(METRICS_PACKAGE),
) | d627c1405b08975aeb02839f2da9d363f385d8b5 | 3,658,543 |
def pancakeSort(self, A):
# ! 这个方法实际上是在每轮循环中寻找最大的那个数,使其在正确的位置
"""
:type A: List[int]
:rtype: List[int]
"""
bucket = sorted(A)
ans = []
for k in range(len(A),0,-1):
i = A.index(bucket.pop())+1
ans += [i, k]
A = A[i:k][::-1] + A[:i] + A[k:]
print(A)
return ans | 35d358c6631f5cc708232f67a3e55d685116dff8 | 3,658,544 |
def getOrc(orcName):
"""Get an orchestra stored in the user namespace.
One can store an orchestra in the user name space with the %%orc magic.
"""
ip = get_ipython()
return ip.user_ns["__orc"][orcName] | 7fed637d4ab653579b4ad78e1b047e236ca46377 | 3,658,545 |
def get_prompt_data_from_batse(grb: str, **kwargs: None) -> pd.DataFrame:
"""Get prompt emission data from BATSE. Creates a directory structure and saves the data.
Returns the data, though no further action needs to be taken by the user.
:param grb: Telephone number of GRB, e.g., 'GRB140903A' or '140903A' are valid inputs.
:type grb: str
:param kwargs: Placeholder to prevent TypeErrors.
:type kwargs: None
:return: The processed data.
:rtype: pandas.DataFrame
"""
getter = BATSEDataGetter(grb=grb)
return getter.get_data() | 1bd7848f455401be89466c88efd9e4d44b3b72e9 | 3,658,546 |
def angular_error(a, b):
"""Calculate angular error (via cosine similarity)."""
a = pitchyaw_to_vector(a) if a.shape[1] == 2 else a
b = pitchyaw_to_vector(b) if b.shape[1] == 2 else b
ab = np.sum(np.multiply(a, b), axis=1)
a_norm = np.linalg.norm(a, axis=1)
b_norm = np.linalg.norm(b, axis=1)
# Avoid zero-values (to avoid NaNs)
a_norm = np.clip(a_norm, a_min=1e-8, a_max=None)
b_norm = np.clip(b_norm, a_min=1e-8, a_max=None)
similarity = np.divide(ab, np.multiply(a_norm, b_norm))
similarity = np.clip(similarity, a_min=-1.+1e-8, a_max=1.-1e-8)
return np.degrees(np.arccos(similarity)) | 89f7a51fc95a55349fc79e58b8f644a1ee6bd8a0 | 3,658,547 |
def includeme(config):
"""
Get build Git repository directory and make it accessible
to all requests generated via Cornice
"""
# Make DB connection accessible as a request property
def _get_repos(request):
_settings = request.registry.settings
repo_dir = _settings['repo_basedir']
return repo_dir
config.add_request_method(_get_repos, 'repo_dir', reify=True) | f2d73eb01b616f79059f4001c7b3faad67f48cd2 | 3,658,548 |
from typing import Union
from pathlib import Path
def add_dot_csv(filename: Union[Path, str]) -> str:
"""Adds a .csv extension to filename."""
return add_extension(filename, '.csv') | b0e89ca231675048ddb65b11856179db140a15fb | 3,658,549 |
from typing import Dict
from typing import Any
def load_settings_from_file(filename: str) -> Dict[str, Any]:
"""Load amset configuration settings from a yaml file.
If the settings file does not contain a required parameter, the default
value will be added to the configuration.
An example file is given in *amset/examples/example_settings.yaml*.
Args:
filename: Path to settings file.
Returns:
The settings, with any missing values set according to the amset
defaults.
"""
logger.info("Loading settings from: {}".format(filename))
settings = loadfn(filename)
return validate_settings(settings) | 8f857ede65c455b51f030edc58577a87cc6159a6 | 3,658,550 |
def execute_query(query, *arguments):
"""Execute a query on the DB with given arguments."""
_db = labpals.model.get_db()
cursor = _db.execute(query, arguments)
rows = cursor.fetchall()
return rows | d1b7aff948ee37b223386af29bbe4a6d0939cde1 | 3,658,551 |
from typing import Dict
from typing import Any
import copy
def format_search_events_results(response: Dict[str, Any], limit: int) -> tuple:
"""
Format the output of the search events results command.
Args:
response (Dict[str,Any]): API response from FortiSIEM.
limit (int):Maximum number of results to retrieve.
Returns:
str: Formatted command output.
"""
outputs = []
events = dict_safe_get(response, ['queryResult', 'events', 'event'])
if isinstance(events, dict):
events = [events]
total_count = arg_to_number(dict_safe_get(response, ['queryResult', '@totalCount']))
total_pages = total_count // limit + (total_count % limit != 0) if total_count else 0
if events:
for event in events:
formatted_event = copy.deepcopy(event)
formatted_attributes = {}
attributes = dict_safe_get(event, ['attributes', 'attribute'])
formatted_event['receiveTime'] = FormatIso8601(arg_to_datetime(event['receiveTime']))
for attribute in attributes:
formatted_attributes[attribute['@name']] = attribute['#text']
formatted_event['attributes'] = formatted_attributes
outputs.append(formatted_event)
return outputs, total_pages | de6b12f2009c3a7dab8093bd5842455e2bd2c84a | 3,658,552 |
from datetime import datetime
def radec_obs_vec_mpc(inds, mpc_object_data):
"""Compute vector of observed ra,dec values for MPC tracking data.
Args:
inds (int array): line numbers of data in file
mpc_object_data (ndarray): MPC observation data for object
Returns:
rov (1xlen(inds) array): vector of ra/dec observed values
"""
rov = np.zeros((2*len(inds)))
for i in range(0,len(inds)):
indm1 = inds[i]-1
# extract observations data
timeobs = Time( datetime(mpc_object_data['yr'][indm1],
mpc_object_data['month'][indm1],
mpc_object_data['day'][indm1]) + timedelta(days=mpc_object_data['utc'][indm1]) )
obs_t_ra_dec = SkyCoord(mpc_object_data['radec'][indm1], unit=(uts.hourangle, uts.deg), obstime=timeobs)
rov[2*i-2], rov[2*i-1] = obs_t_ra_dec.ra.rad, obs_t_ra_dec.dec.rad
return rov | daa0a7bfc5a1532c4a63f4543f4ea5e3db099973 | 3,658,553 |
def mod(x, y) -> ProcessBuilder:
"""
Modulo
:param x: A number to be used as the dividend.
:param y: A number to be used as the divisor.
:return: The remainder after division.
"""
return _process('mod', x=x, y=y) | fb94d3a3e1dcd918d8405232ad11f00943895785 | 3,658,554 |
from sys import path
def find_test_file_loc(test_data_dir):
"""
Return a new, unique and non-existing base name location suitable to create
a new copyright test.
"""
template = "copyright_{}.txt"
idx = 1
while True:
test_file_loc = path.join(test_data_dir, template.format(idx))
if not path.exists(test_file_loc):
return test_file_loc
idx += 1 | b12f5b454ddf335af800b5dd7601ba47a17cb7ed | 3,658,555 |
def get_list_of_encodings() -> list:
"""
Get a list of all implemented encodings.
! Adapt if new encoding is added !
:return: List of all possible encodings
"""
return ['raw', '012', 'onehot', '101'] | 6e0749eb45f85afe4e5c7414e4d23e67335ba2b5 | 3,658,556 |
def region_to_bin(chr_start_bin, bin_size, chr, start):
"""Translate genomic region to Cooler bin idx.
Parameters:
----------
chr_start_bin : dict
Dictionary translating chromosome id to bin start index
bin_size : int
Size of the bin
chr : str
Chromosome
start : int
Start of the genomic region
"""
return chr_start_bin[chr] + start // bin_size | f17b132048b0ceb4bbf2a87b77327d0d63b3fd64 | 3,658,557 |
import os
def get_img_name(img_path: str):
"""
Get the name from the image path.
Args:
img_path (str): a/b.jpg or a/b.png ...
Returns:
name (str): a/b.jpg -> b
"""
image_name = os.path.split(img_path)[-1].split('.')[0]
return image_name | 290bcaa133fd414874838f42c2781980954b98ef | 3,658,558 |
def cvCalcProbDensity(*args):
"""
cvCalcProbDensity(CvHistogram hist1, CvHistogram hist2, CvHistogram dst_hist,
double scale=255)
"""
return _cv.cvCalcProbDensity(*args) | dc0ce1eb33a07466d29defe0b4112e46cabe1308 | 3,658,559 |
def get_filter_para(node_element):
"""Return paragraph containing the used filter description"""
para = nodes.paragraph()
filter_text = "Used filter:"
filter_text += " status(%s)" % " OR ".join(node_element["status"]) if len(
node_element["status"]) > 0 else ""
if len(node_element["status"]) > 0 and len(node_element["tags"]) > 0:
filter_text += " AND "
filter_text += " tags(%s)" % " OR ".join(node_element["tags"]) if len(
node_element["tags"]) > 0 else ""
if (len(node_element["status"]) > 0 or len(node_element["tags"]) > 0) and len(
node_element["types"]) > 0:
filter_text += " AND "
filter_text += " types(%s)" % " OR ".join(node_element["types"]) if len(
node_element["types"]) > 0 else ""
filter_node = nodes.emphasis(filter_text, filter_text)
para += filter_node
return para | 7b3ad6b0a9752a53bd16d9cee2a250f54f43def3 | 3,658,560 |
def mk_multi_line_figax(nrows, ncols, xlabel='time (s)', ylabel='signal (a.u.)'):
"""
Create the figure and axes for a
multipanel 2d-line plot
"""
# ncols and nrows get
# restricted via the plotting frontend
x_size = ncols * pltConfig['mXSize']
y_size = nrows * pltConfig['mYSize']
fig, axs = ppl.subplots(nrows, ncols, figsize=(x_size, y_size),
sharex=True, sharey=True, squeeze=False)
# Hide the right and top spines
# and remove all tick labels
for ax in axs.flatten():
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize=0)
# determine axis layout
y_left = axs[:, 0]
x_bottom = axs[-1, :]
# write tick and axis labels only on outer axes to save space
for ax in y_left:
ax.tick_params(labelsize=pltConfig['mTickSize'])
ax.set_ylabel(ylabel, fontsize=pltConfig['mLabelSize'])
for ax in x_bottom:
ax.tick_params(labelsize=pltConfig['mTickSize'])
ax.set_xlabel(xlabel, fontsize=pltConfig['mLabelSize'])
return fig, axs | c759b4111a8cb3015aa9896f5afd2f8831ad8665 | 3,658,561 |
def load_sizes(infile_path: str, header: bool=None):
"""
Load and parse a gtf file. More information on the gtf format is here:
https://asia.ensembl.org/info/website/upload/gff.html
Arguments:
(REQUIRED) infile_path: path to gtf file
(OPTIONAL) header: headers in size file (DEFAULT: None)
chr1 247249719
chr2 242951149
...
"""
return pd.read_csv(infile_path, sep="\t", header=None, index_col=0) | 0b1737bb905b57f719c8f2369d771794dd49666b | 3,658,562 |
import random
def latent_tree_mutate(g):
"""Produce an offspring genome by mutation through dict
manipulation. Choose a random key in the dict, and overwrite its
value with a random int. Later, repair must make sure the
offspring is valid, including using the mod rule to map from a
(possibly) large int to the corresponding small one (ie the one
giving the same production choice) in the range of possible
choices."""
# FIXME We don't rely on g being a copy, in case the search
# algorithm sometimes mutates individuals which are original
# members of the population.
# See https://github.com/PonyGE/PonyGE2/issues/89.
g = g.copy()
k = random.choice(list(g.keys()))
g[k] = random.randrange(1000000) # there is no true maxint on py 3
return g | 6bc78dd620962377e892f69a217a2ae2771e6f35 | 3,658,563 |
import string
import pickle
def load_model(file_path: string):
"""
Used to serialize an save a trained model, so it can be reused later on again.
-----------------------------------------------------------------------------------
Parameters:
-----------------------------------------------------------------------------------
file_path: List (ndarray, int)
Path to a stored model from prior running save_model().
Returns:
-----------------------------------------------------------------------------------
fcm_model: List (ndarray, float)
The de-serialized model.
"""
fcm_model = pickle.load(open(file_path, 'rb'))
return fcm_model | 26278c46092dff6199a82b1425203af1883ba49d | 3,658,564 |
import numpy as np
def gfs_mos_forecast(stid, forecast_date):
"""
Do the data retrieval.
"""
# Generate a Forecast object
forecast = Forecast(stid, default_model_name, forecast_date)
forecast.daily.high = np.round(np.random.rand() * 100.)
forecast.daily.low = np.round(np.random.rand() * 100.)
forecast.daily.wind = np.round(np.random.rand() * 40.)
forecast.daily.rain = np.round(np.random.rand() * 3., 2)
# Create a dummy pd dataframe to test
forecast.timeseries.data['DateTime'] = [forecast_date, forecast_date +
timedelta(hours=3)]
forecast.timeseries.data['temperature'] = [56., 55.]
forecast.timeseries.data['dewpoint'] = [51., 51.]
return forecast | 8ba16fe350e5eef77f9eb960de4b447bcb420b5f | 3,658,565 |
def evaluate_accuracy_score(preprocessing, prediction_binary):
"""
Evaluates the accuracy score
:param preprocessing: prepared DataPreprocess instance
:param prediction_binary: boolean expression for the predicted classes
"""
accuracy = []
for j in range(len(DETECTION_CLASSES)):
acc = accuracy_score(preprocessing.target_classes[:, j], prediction_binary[:, j])
accuracy.append(acc)
return np.mean(accuracy) | 9ee9110f924a930d442d00d4c06a929ba7589e42 | 3,658,566 |
def test_domain_visualize(case, visu_case):
"""
test the domain visualization
"""
dom = pylbm.Domain(case)
views = dom.visualize(**visu_case)
return views.fig | a395aad44955eb0599e257ccfeb326cb08638fcd | 3,658,567 |
import torch
def create_supervised_evaluator(model, metrics,
device=None):
"""
Factory function for creating an evaluator for supervised models
Args:
model (`torch.nn.Module`): the model to train
metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: an evaluator engine with supervised inference function
"""
if device:
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
data, language, pids, camids = batch
batchsize = language.size(0)
wordclass_feed = np.zeros((batchsize, max_tokens), dtype='int64')
wordclass_feed[:,0] = wordlist_final.index('<S>')
outcaps = np.empty((batchsize, 0)).tolist()
data = data.to(device) if torch.cuda.device_count() >= 1 else data
# language = language.to(device) if torch.cuda.device_count() >= 1 else language
for j in range(max_tokens-1):
wordclass = Variable(torch.from_numpy(wordclass_feed)).cuda()
features, wordact, _= model(data, wordclass)
wordact = wordact[:,:,:-1]
wordact_t = wordact.permute(0, 2, 1).contiguous().view(batchsize*(max_tokens-1), -1)
wordprobs = F.softmax(wordact_t).cpu().data.numpy()
wordids = np.argmax(wordprobs, axis=1)
for k in range(batchsize):
word = wordlist_final[wordids[j+k*(max_tokens-1)]]
outcaps[k].append(word)
if(j < max_tokens-1):
wordclass_feed[k, j+1] = wordids[j+k*(max_tokens-1)]
for j in range(batchsize):
num_words = len(outcaps[j])
if 'EOS' in outcaps[j]:
num_words = outcaps[j].index('EOS')
outcap = ' '.join(outcaps[j][:num_words])
feat, _, _ = model(data, wordclass)
print (outcap)
return feat, pids, camids
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine | da5c39b8a8d841181fc63ae48db0c68f9bbfe278 | 3,658,568 |
def get_available_operations():
""" Return a dict of available operations """
return True, runtime.get_available_operations() | 9d0b744061c97cf10fb69ccfdbc403b8f337db3d | 3,658,569 |
def word_distance(word1, word2):
"""Computes the number of differences between two words.
word1, word2: strings
Returns: integer
"""
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count | b3279744c628f3adc05a28d9ab7cc520744b540c | 3,658,570 |
from typing import Union
from typing import Tuple
from typing import Any
def get_parent_child(root: dict,
path: str) -> Union[Tuple[Tuple[None, None],
Tuple[None, None]],
Tuple[Tuple[dict, None],
Tuple[Any, str]],
Tuple[Tuple[Any, str],
Tuple[Any, str]]]:
""" Get first and second level node
:param root: The root node.
:param path: The path to identify the leaf node.
:return: (
(
parent node: The first level node in the hierarchy of the path
parent path: The path based on the root node
)
(
child node: The second level node in the hierarchy of the path
child path: The path based on the parent node
)
)
"""
res = Ddict.search(root, path)
if res is None:
if '.' not in path:
return (None, None), (None, None)
else:
child = Ddict.get(root, path)
return (root, None), (child, path)
parent_name, parent_value, child_name = res
if child_name:
child_value = Ddict.get(parent_value, child_name)
return (parent_value, parent_name), (child_value, child_name)
else:
return (root, None), (parent_value, parent_name) | 3e33e32af6b3f67cf41397b6da399ec9ede5491e | 3,658,571 |
def get_data_loaders(personachat, tokenizer, args_num_candidates=1, args_personality_permutations=1, args_max_history=2):
""" Prepare the dataset for training and evaluation """
print("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if args_num_candidates > 0 and dataset_name == 'train':
num_candidates = min(args_num_candidates, num_candidates)
for dialog in dataset:
persona = dialog["personality"].copy()
for _ in range(args_personality_permutations):
for utterance in dialog["utterances"]:
history = utterance["history"][-(2*args_max_history+1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates-1)
instance, _ = build_input_from_segments(persona, history, candidate, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
persona = [persona[-1]] + persona[:-1] # permuted personalities
print("Pad inputs and convert to Tensor")
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids('<pad>'))
for input_name in MODEL_INPUTS:
tensor = dataset[input_name]
dataset[input_name] = np.array(tensor)
return datasets | 212e7bdcdd880b47c56b76fe2e33ce12c665c650 | 3,658,572 |
def unescape_strict(s):
"""
Re-implements html.unescape to use our own definition of `_charref`
"""
if '&' not in s:
return s
return _charref.sub(_replace_charref, s) | d2b9aace645af58dce1e5a5f5e5cf3be919b759b | 3,658,573 |
def CheckVPythonSpec(input_api, output_api, file_filter=None):
"""Validates any changed .vpython files with vpython verification tool.
Args:
input_api: Bag of input related interfaces.
output_api: Bag of output related interfaces.
file_filter: Custom function that takes a path (relative to client root) and
returns boolean, which is used to filter files for which to apply the
verification to. Defaults to any path ending with .vpython, which captures
both global .vpython and <script>.vpython files.
Returns:
A list of input_api.Command objects containing verification commands.
"""
file_filter = file_filter or (lambda f: f.LocalPath().endswith('.vpython'))
affected_files = input_api.AffectedTestableFiles(file_filter=file_filter)
affected_files = map(lambda f: f.AbsoluteLocalPath(), affected_files)
commands = []
for f in affected_files:
commands.append(input_api.Command(
'Verify %s' % f,
['vpython', '-vpython-spec', f, '-vpython-tool', 'verify'],
{'stderr': input_api.subprocess.STDOUT},
output_api.PresubmitError))
return commands | d6e888b5ce6fec4bbdb35452b3c0572702430c06 | 3,658,574 |
import os
import yaml
def _read_from_file(paramfile):
"""
Code to load parameter data from a YAML file, moved out of
check_metadata_format to allow different inputs to that function.
:param paramfile: The parameter file created by 'precheck_data_format' and
'select_data_templates'.
:type paramfile: str
"""
# Read in the parameter file.
if os.path.isfile(paramfile):
with open(paramfile, 'r') as istream:
param_data = yaml.load(istream)
else:
raise OSError('Input parameter file not found. Looking for "' +
paramfile + '".')
return param_data | 7302f6e7a86a7d8960c3f84caa47a7b5bf73602d | 3,658,575 |
import types
from typing import Tuple
def test_infer_errs() -> None:
"""Test inference applied to functions."""
with f.Fun(MockServer()):
a = f.put(b"bla bla")
b = f.put(3)
with pytest.raises(TypeError):
f.py(lambda x, y, z: (x, y), a, a, b)
# should NOT raise
f.py(
lambda x, y, z: (x, y),
a,
a,
b,
out=[types.Encoding.blob, types.Encoding.blob],
)
def i1o2(x: bytes) -> Tuple[bytes, bytes]:
return x, x
def i2o1(x: bytes, y: bytes) -> bytes:
return x
with pytest.raises(TypeError):
out = f.morph(i1o2, a) # type:ignore # noqa:F841
with pytest.raises(TypeError):
out = f.reduce(i1o2, a) # type:ignore # noqa:F841
with pytest.raises(TypeError):
out = f.reduce(lambda x, y: x, a, b) # type:ignore # noqa:F841
# If we pass out= then the inference is skipped
out = f.morph(i1o2, a, out=types.Encoding.blob) # type:ignore # noqa:F841
out = f.reduce(i1o2, a, out=types.Encoding.blob) | 434e5b19f6ad15d6644224475ddd656184593c19 | 3,658,576 |
def decode_captions(captions, idx_to_word):
""" Decode text captions from index in vocabulary to words.
"""
if captions.ndim == 1:
T = captions.shape[0]
N = 1
else:
N, T = captions.shape
decoded = []
for i in range(N):
words = []
for t in range(T):
if captions.ndim == 1:
word = idx_to_word[captions[t]]
else:
word = idx_to_word[captions[i, t]]
if word == '<END>':
words.append('.')
break
if word != '<NULL>':
words.append(word)
decoded.append(' '.join(words))
return decoded | a56abe824b522418480c80611505dabd0a8af6cc | 3,658,577 |
from typing import Tuple
from pathlib import Path
import logging
def save_kdeplot(df: DataFrame,
output_plot: str,
x_name: str,
title: str,
color: str,
x_label: str = None,
y_label: str = None,
normalize_x: bool = True,
fig_size: Tuple[int] = (24, 12),
):
"""This function helps for computing automated kdeplots using seaborn.
It sets up somewhat standardized figure output for a harmonized rendering.
:param df: the DataFrame with data to plot
:param output_plot: the output plot full file name
:param x_name: DF column name to use for x-axis
:param x_label: the name to display on the plot for x-axis
:param y_label: the name to display on the plot for y-axis
:param color: color to use for bars, theoritically could also be a list of colors
:param fig_size: tuple of integers defining the plot dimensions (x, y)
:return: the figure in searborn format
"""
# detect format from file extension
format = Path(output_plot).suffix[1:].lower()
if format != 'svg' and format != 'png':
raise ValueError(f"ERROR! UNKNOWN PLOT FORMAT! ('{format}')")
logging.debug(f"FORMAT FOR PLOT: '{format}'")
# delete existing file for preventing stacking of plots
p = Path(output_plot)
if p.exists():
p.unlink()
# general style for plotting
sns.set(rc={'figure.figsize': fig_size})
sns.set_style('whitegrid', {'axes.edgecolor': '0.2'})
sns.set_context("paper", font_scale=2)
ax = sns.kdeplot(df[x_name], shade=True, label='', color=color)
ax.set_title(title, fontsize=24, y=1.02)
ax.tick_params(labelsize=20)
ax.tick_params(axis='x', rotation=0)
ax.set_xlim(0, 1)
# ax.set_xticklabels(df[x_name])
label_format = '{:,.0%}'
ticks_loc = ax.get_xticks().tolist()
ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
ax.set_xticklabels([label_format.format(x) for x in ticks_loc])
#ax.set_xticklabels(['{:,.0%}'.format(x) for x in ax.get_xticks()])
ax.set_xlabel(x_label, fontsize=25, labelpad=20)
ax.set_ylabel(y_label, fontsize=25, labelpad=20)
# save
figure = ax.get_figure()
figure.savefig(output_plot, dpi=600)
plt.clf()
plt.close()
return figure | ea1b95180fb56a365b753288953950a209d87da2 | 3,658,578 |
def make_loc(caller):
"""
turn caller location into a string
"""
# return caller["file"] + ":" + caller["func"] + ":" + caller["line"]
return caller["file"] + ":" + str(caller["line"]) | e0db31ffd5c76636938bfe66184f9a2a6fbca496 | 3,658,579 |
import difflib
import sys
def diff(file1, file2):
"""
Compare two files, ignoring line end differences
If there are differences, print them to stderr in unified diff format.
@param file1 The full pathname of the first file to compare
@param file2 The full pathname of the second file to compare
@return True if the files are the same, o
"""
with open(file1, 'r') as input1:
with open(file2, 'r') as input2:
diffs = difflib.unified_diff(
input1.read().splitlines(),
input2.read().splitlines()
)
no_diffs = True
for diff in diffs:
no_diffs = False
print(diff, file=sys.stderr)
return no_diffs | 980090001ce265afd736e97396315a6a3b72441e | 3,658,580 |
def run_part2(file_content):
"""Implmentation for Part 2."""
numbers = (int(number) for number in file_content.split())
root = _build_tree(numbers)
return _node_value(root) | 47171de36eacabd438f1243bddd866af6187c763 | 3,658,581 |
def get_cap_selected_frame(cap, show_frame):
"""
Gets a frame from an opencv video capture object to a specific frame
"""
cap_set_frame(cap, show_frame)
ret, frame = cap.read()
if not ret:
return None
else:
return frame | 4a5a939368e09faea3094335f60e782a249616ce | 3,658,582 |
def rotate_coords_x(pos, angle):
""" Rotate a set of coordinates about the x-axis
:param pos: (n, 3) xyz coordinates to be rotated
:param angle: angle to rotate them by w.r.t origin
:type pos: numpy.ndarray
:type angle: float
:return: array of rotated coordinates
:rtype: numpy.ndarray
"""
xyz = np.copy(pos)
angle *= (np.pi / 180) # convert to radians
R = rotate_x(angle)
for i in range(np.shape(xyz)[0]):
xyz[i, :] = np.dot(R, xyz[i, :])
return xyz | af0a95302c44be54e78b88b8f9851bab29556900 | 3,658,583 |
import itertools
def q_learning(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):
"""
Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy
while following an epsilon-greedy policy
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Lambda time discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, episode_lengths).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
current_state = env.reset()
# keep track number of time-step per episode only for plotting
for t in itertools.count():
# choose the action based on epsilon greedy policy
action_probs = policy(current_state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step(action)
# sse the greedy action to evaluate Q, not the one we actually follow
greedy_next_action = Q[next_state].argmax()
# evaluate Q using estimated action value of (next_state, greedy_next_action)
td_target = reward + discount_factor * Q[next_state][greedy_next_action]
td_error = td_target - Q[current_state][action]
Q[current_state][action] += alpha * td_error
# improve epsilon greedy policy using new evaluate Q
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
# update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
if done:
break
else:
current_state = next_state
return Q, stats | 380c46f9a1c35424028cbf54d905b7b3df1181ec | 3,658,584 |
import random
def find_rand_source_reg():
"""Find random source register based on readAfterWrite probability"""
prob=random.uniform(0,1)
while len(previousIntegerSources)>numberOfPreviousRegistersToConsider:
previousIntegerSources.popleft()
if prob<readAfterWrite and previousIntegerDestinations:
num=random.choice(previousIntegerDestinations)
else:
num=random.randint(1,31)
previousIntegerSources.append(num)
return num | 678223dc137a624b670834bc2fc84d6f5481d130 | 3,658,585 |
def _get_qnode_class(device, interface, diff_method):
"""Returns the class for the specified QNode.
Args:
device (~.Device): a PennyLane-compatible device
interface (str): the interface that will be used for classical backpropagation
diff_method (str, None): the method of differentiation to use in the created QNode
Raises:
ValueError: if an unrecognized ``diff_method`` is provided
Returns:
~.BaseQNode: the QNode class object that is compatible with the provided device and
differentiation method
"""
# pylint: disable=too-many-return-statements,too-many-branches
model = device.capabilities().get("model", "qubit")
passthru_interface = device.capabilities().get("passthru_interface", None)
device_provides_jacobian = device.capabilities().get("provides_jacobian", False)
allows_passthru = passthru_interface is not None
if diff_method is None:
# QNode is not differentiable
return BaseQNode
if diff_method == "best":
if allows_passthru and interface == passthru_interface:
# hand off differentiation to the device without type conversion
return PassthruQNode
if device_provides_jacobian:
# hand off differentiation to the device
return DeviceJacobianQNode
if model in PARAMETER_SHIFT_QNODES:
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
if diff_method == "backprop":
if allows_passthru:
if interface != passthru_interface:
raise ValueError(
"Device {} only supports diff_method='backprop' when using the "
"{} interface.".format(device.short_name, passthru_interface)
)
return PassthruQNode
raise ValueError(
"The {} device does not support native computations with "
"autodifferentiation frameworks.".format(device.short_name)
)
if diff_method == "device":
if device_provides_jacobian:
return DeviceJacobianQNode
raise ValueError(
"The {} device does not provide a native method "
"for computing the jacobian.".format(device.short_name)
)
if diff_method == "parameter-shift":
if model in PARAMETER_SHIFT_QNODES:
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
raise ValueError(
"The parameter shift rule is not available for devices with model {}.".format(model)
)
if diff_method == "reversible":
# pylint: disable=protected-access
if not device.capabilities().get("reversible_diff", False):
raise ValueError(
"Reversible differentiation method not supported on {}".format(device.short_name)
)
return ReversibleQNode
if diff_method in ALLOWED_DIFF_METHODS:
# finite differences
return JacobianQNode
raise ValueError(
"Differentiation method {} not recognized. Allowed "
"options are {}".format(diff_method, ALLOWED_DIFF_METHODS)
) | cb87fd664e37074fbad065e7c707554c1632a0d9 | 3,658,586 |
def evaluate_and_log_bleu(model, bleu_source, bleu_ref, vocab_file):
"""Calculate and record the BLEU score."""
subtokenizer = tokenizer.Subtokenizer(vocab_file)
uncased_score, cased_score = translate_and_compute_bleu(
model, subtokenizer, bleu_source, bleu_ref)
tf.compat.v1.logging.info("Bleu score (uncased): %s", uncased_score)
tf.compat.v1.logging.info("Bleu score (cased): %s", cased_score)
return uncased_score, cased_score | 5b7665851c69e0edfe526763a76582f10eb88bf0 | 3,658,587 |
def transform_call(red_node):
"""
Converts Python style function calls to VHDL style:
self.d(a) -> d(self, a)
If function owner is not exactly 'self' then 'type' is prepended.
self.next.moving_average.main(x) -> type.main(self.next.moving_average, x)
self.d(a) -> d(self, a)
self.next.d(a) -> d(self.next, a)
local.d() -> type.d(local)
self.local.d() -> type.d(self.local)
If return then:
b = self.a(arg) ->
variable pyha_ret_0: type;
a(self, arg, pyha_ret_0);
b := pyha_ret_0;
Handling call inside call is limited to depth 1.
"""
def find_line_node(red_obj):
line_node = red_obj
while True:
if type(line_node.next) == EndlNode:
break
if hasattr(line_node.parent, 'value') and type(line_node.parent.value) == LineProxyList:
if not (hasattr(line_node.parent, 'test') and (
line_node.parent.test == atom # if WE are the if condition, skip
or line_node.parent.test == atom.parent)): # if WE are the if condition (part of condition)
break
line_node = line_node.parent
return line_node
is_hack = False
# make sure each created variable is unique by appending this number and incrementing
tmp_var_count = 0
# loop over all atomtrailers, call is always a member of this
atomtrailers = red_node.find_all('atomtrailers')
for i, atom in enumerate(atomtrailers):
if is_hack: # when parsed out of order call
atom = atomtrailers[i - 1]
call = atom.call
is_hack = False
else:
call = atom.call # this actually points to the stuff between ()
if call is None: # this atomtrailer has no function call
continue
wat = call.call
if wat is not None: # one of the arguments is a call -> process it first (i expect it is next in the list)
call_index = wat.previous.index_on_parent
if call_index == 0: # input is something like x() -> len(), Sfix() ....
pass
else:
try:
atom = atomtrailers[i + 1]
call = atom.call
is_hack = True
except:
continue # no idea what is going on here...
if call is None: # this atomtrailer has no function call
continue
call_index = call.previous.index_on_parent
if call_index == 0: # input is something like x() -> len(), Sfix() ....
continue
# get the TARGET function object from datamodel
target_func_name = atom.copy()
del target_func_name[call_index + 1:]
try:
target_func_obj = super_getattr(convert_obj, str(target_func_name))
except: # happend for: (self.conjugate(complex_in) * complex_in).real
continue
if not target_func_obj.calls:
# function is not simulated...
line_node = find_line_node(atom)
line_node.replace(f'# comment out because not called in simulation: {line_node.dumps()}')
continue
prefix = atom.copy()
del prefix[call_index:]
del atom[:call_index]
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
# this branch happens because of 'for transform'
tmp[0][0] = 'self_const'
call.insert(0, tmp)
else:
tmp[0] = 'self_const'
call.insert(0, tmp)
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
tmp[0][0] = 'self_next'
call.insert(0, tmp)
else:
tmp[0] = 'self_next'
call.insert(0, tmp)
tmp = prefix.copy()
if isinstance(tmp[0], AtomtrailersNode):
tmp[0][0] = 'self'
call.insert(0, tmp)
else:
tmp[0] = 'self'
call.insert(0, tmp)
# get the SOURCE (where call is going on) function object from datamodel
def_parent = atom
while not isinstance(def_parent, DefNode):
def_parent = def_parent.parent
# def_parent = atom.parent_find('def')
source_func_name = f'self.{def_parent.name}'
source_func_obj = super_getattr(convert_obj, str(source_func_name))
# if call is not to local class function
# self.moving_average.main(x) -> MODULE_NAME.main(self.moving_average, x)
if str(prefix) != 'self':
var = super_getattr(convert_obj, str(prefix))
var = init_vhdl_type('-', var, var)
atom.insert(0, var._pyha_module_name())
if target_func_obj.get_output_types() is None:
continue # function is not returning stuff -> this is simple
else:
# add return variables to function locals, so that they will be converted to VHDL variables
ret_vars = []
for x in get_iterable(target_func_obj.get_output_types()):
name = f'pyha_ret_{tmp_var_count}'
ret_vars.append(name)
source_func_obj.add_local_type(name, x)
tmp_var_count += 1
# add return variable to arguments
call.append(name)
# call.value[-1].target = f'ret_{j}'
# need to add new source line before the CURRENT line..search for the node with linenodes
line_node = find_line_node(atom)
# add function call BEFORE the CURRENT line
if line_node != atom: # equality means that value is not assigned to anything
line_node.parent.insert(line_node.index_on_parent, atom.copy())
atom.replace(','.join(ret_vars)) | 21091d369d75f5f51065e2a2df95956816d8b968 | 3,658,588 |
import random
def delta_next_time_to_send(G, u, v):
"""How long to wait before U should send a message to V under diffusion
spreading. Per the Bitcoin protocol, this depends on if we have an outgoing
connection or an incoming connection."""
is_outgoing = G[u][v][ORIGINATOR] == u
average_interval_seconds = 2 if is_outgoing else 5
delta = int(log1p(-random.random()) * average_interval_seconds * -1000000 + 0.5)
return delta if delta > 0 else 0 | 193e847c8dfe1bf4e23bb3ed0a749c36f83c9f61 | 3,658,589 |
def processData(list_pc, imo):
"""
Cette fonction traite les données de getData pour écrire une seule string
prête à être copié dans le csv et qui contient toutes les lignes d'un bateau
"""
str_pc = ''
for i in range(len(list_pc)):
if list_pc[i] == 'Arrival (UTC)':
tab = list_pc[i-1].split(',') # [Port, Country] (good) or [Port, Region, Country] (bad)
if len(tab) == 3:
tab = ['"' + tab[0] + ',' + tab[1].strip() + '"', tab[2]] # [Port+(Region), Country]
str_pc = str_pc + imo + ',' + tab[0] + ',' + tab[1] + ',"' + list_pc[i+1] + '","' + list_pc[i+3] + '","' + list_pc[i+5] + '"\n'
return str_pc | abb9d0a8d9f3f1ed35e4f991a3ac14e51621f104 | 3,658,590 |
def wrn(num_classes):
"""Constructs a wideres-28-10 model without dropout.
"""
return Wide_ResNet(28, 10, 0, num_classes) | bcf33fdaf7081389b2c4b2e8f172684531205315 | 3,658,591 |
from typing import Dict
from typing import Any
from typing import Optional
def run(
config: Dict[str, Any],
log_dir: str = "",
kernel_seed: int = 0,
kernel_random_state: Optional[np.random.RandomState] = None,
) -> Dict[str, Any]:
"""
Wrapper function that enables to run one simulation.
It does the following steps:
- instantiation of the kernel
- running of the simulation
- return the end_state object
Arguments:
config: configuration file for the specific simulation
log_dir: directory where log files are stored
kernel_seed: simulation seed
kernel_random_state: simulation random state
"""
coloredlogs.install(
level=config["stdout_log_level"],
fmt="[%(process)d] %(levelname)s %(name)s %(message)s",
)
kernel = Kernel(
random_state=kernel_random_state or np.random.RandomState(seed=kernel_seed),
log_dir=log_dir,
**subdict(
config,
[
"start_time",
"stop_time",
"agents",
"agent_latency_model",
"default_computation_delay",
"custom_properties",
],
),
)
sim_start_time = dt.datetime.now()
logger.info(f"Simulation Start Time: {sim_start_time}")
end_state = kernel.run()
sim_end_time = dt.datetime.now()
logger.info(f"Simulation End Time: {sim_end_time}")
logger.info(f"Time taken to run simulation: {sim_end_time - sim_start_time}")
return end_state | c8bb7931c9b74064d3488bfa92fb1376b9f9f474 | 3,658,592 |
def python_to_pydict(script_contents, namespace=None):
"""Load a Python script with dictionaries into a dictionary."""
if namespace is None:
namespace = {}
exec script_contents in {}, namespace
return to_lower(namespace) | 7f1dcf2099b2a5b132b6f7d7355b903d4328a84d | 3,658,593 |
def convertInt(s):
"""Tells if a string can be converted to int and converts it
Args:
s : str
Returns:
s : str
Standardized token 'INT' if s can be turned to an int, s otherwise
"""
try:
int(s)
return "INT"
except:
return s | a0eae31b69d4efcf8f8595e745316ea8622e24b3 | 3,658,594 |
import torch
def pairwise_distance(A, B):
"""
Compute distance between points in A and points in B
:param A: (m,n) -m points, each of n dimension. Every row vector is a point, denoted as A(i).
:param B: (k,n) -k points, each of n dimension. Every row vector is a point, denoted as B(j).
:return: Matrix with (m, k). And the ele in (i,j) is the distance between A(i) and B(j)
"""
A_square = torch.sum(A * A, dim=1, keepdim=True)
B_square = torch.sum(B * B, dim=1, keepdim=True)
distance = A_square + B_square.t() - 2 * torch.matmul(A, B.t())
return distance | 2142b94f91f9e762d1a8b134fdda4789c564455d | 3,658,595 |
from typing import Tuple
def _split_full_name(full_name: str) -> Tuple[str, str, str]:
"""Extracts the `(ds name, config, version)` from the full_name."""
if not tfds.core.registered.is_full_name(full_name):
raise ValueError(
f'Parsing builder name string {full_name} failed.'
'The builder name string must be of the following format:'
'`dataset_name[/config_name]/version`')
ds_name, *optional_config, version = full_name.split('/')
assert len(optional_config) <= 1
config = next(iter(optional_config)) if optional_config else ''
return ds_name, config, version | 2b2ace6e0df3302c8899834be749e0ef23c8df6d | 3,658,596 |
def query_paginate(resources, arguments):
"""Return the resources paginated
Args:
resources(list): List to paginate
arguments(FormsDict): query arguments
Returns:
list: Paginated resource (asc or desc)
"""
if '_page' not in arguments:
return resources
page = int(arguments['_page'])
limit = 10 if '_limit' not in arguments else int(arguments['_limit'])
chunk_data = list(chunk_list(resources, limit))
results = chunk_data[page-1]
link_header = build_link_header(request, page, len(chunk_data))
response.set_header("Link", link_header)
return results | caeefb937501945be2f35792dbdec9e7eefcadef | 3,658,597 |
def convert_grad(graph):
"""Remove all instances of SymbolicKeyType in the graphs.
They will be replaced by globally-unique integers.
"""
mng = graph.manager
counter = 0
key_map = {}
for node in mng.all_nodes:
if node.is_constant(SymbolicKeyInstance):
if node.value not in key_map:
key_map[node.value] = counter
counter += 1
node.value = key_map[node.value]
node.abstract = to_abstract(node.value)
return graph | 7dfec6d6319630024bfb84872fd99b55168f0028 | 3,658,598 |
def site_data(db, settings):
"""Simple fake site data
"""
if organizations_support_sites():
settings.FEATURES['FIGURES_IS_MULTISITE'] = True
site_data = make_site_data()
ce = site_data['enrollments'][0]
lcgm = [
LearnerCourseGradeMetricsFactory(site=site_data['site'],
user=ce.user,
course_id=str(ce.course_id),
date_for='2020-10-01'),
]
site_data['lcgm'] = lcgm
return site_data | 395751133325b4fb6dc0ea463726c56b95c7d2a7 | 3,658,599 |
Subsets and Splits