id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
31,616 |
def test_edit_case(mocker):
params = {
"url": "https://content.demisto.works:20120",
"insecure": True,
"credentials": {
"identifier": "NGCP",
"password": "TIl1nW@H0ZEG"
},
"version": "11.3"}
raw_response_has_event_list = {"assignedTo": 8207, "closeTime": "2021-05-25T10:29:17Z", "dataSourceList": ["47"],
"deviceList": None,
"eventList": [
{"id": "144117387300438016|6204912068", "lastTime": "2021-05-25T09:47:10Z",
"message": "TEST"}],
"history": "\n------- Viewed: 05/25/2021 10:26:37(GMT)"
"TEST@TEST -------\n\n------- Viewed: 05/25/2021 10:27:34("
"GMT) "
" TEST@TEST -------\n",
"id": 58136,
"notes": "------- Opened on 2021/05/25 09:53:53(GMT) by Triggered Condition -------"
"\n\n------- In Progress: 05/25/2021 10:29:17(GMT) Xsoar@TEST -------"
"\n\n------- Changes: 05/25/2021 10:29:17(GMT) Xsoar@TEST -------"
"\n Organization\n old: None\n new: BRD"
"\n\n", "openTime": "2021-05-25T09:53:53Z",
"orgId": 2, "severity": 50, "statusId": 3,
"summary": "BRD-ALERT- Recon - Horizontal - Scan"}
mocker.patch.object(McAfeeESMClient, 'get_case_detail', return_value=('', {}, raw_response_has_event_list))
mocker.patch.object(McAfeeESMClient, '_McAfeeESMClient__request', return_value={})
client = McAfeeESMClient(params)
client.edit_case()
result = client._McAfeeESMClient__request.call_args.kwargs['data']['caseDetail']
assert len(result['eventList']) > 0
|
def test_edit_case(mocker):
params = {
"url": "https://content.demisto.works:20120",
"insecure": True,
"credentials": {
"identifier": "NGCP",
"password": "TIl1nW@H0ZEG"
},
"version": "11.3"}
raw_response_has_event_list = {"assignedTo": 8207, "closeTime": "2021-05-25T10:29:17Z", "dataSourceList": ["47"],
"deviceList": None,
"eventList": [
{"id": "144117387300438016|6204912068", "lastTime": "2021-05-25T09:47:10Z",
"message": "TEST"}],
"history": "\n------- Viewed: 05/25/2021 10:26:37(GMT)"
"TEST@TEST -------\n\n------- Viewed: 05/25/2021 10:27:34("
"GMT) "
" TEST@TEST -------\n",
"id": 58136,
"notes": "------- Opened on 2021/05/25 09:53:53(GMT) by Triggered Condition -------"
"\n\n------- In Progress: 05/25/2021 10:29:17(GMT) Xsoar@TEST -------"
"\n\n------- Changes: 05/25/2021 10:29:17(GMT) Xsoar@TEST -------"
"\n Organization\n old: None\n new: BRD"
"\n\n", "openTime": "2021-05-25T09:53:53Z",
"orgId": 2, "severity": 50, "statusId": 3,
"summary": "ALERT - Scan"}
mocker.patch.object(McAfeeESMClient, 'get_case_detail', return_value=('', {}, raw_response_has_event_list))
mocker.patch.object(McAfeeESMClient, '_McAfeeESMClient__request', return_value={})
client = McAfeeESMClient(params)
client.edit_case()
result = client._McAfeeESMClient__request.call_args.kwargs['data']['caseDetail']
assert len(result['eventList']) > 0
|
8,727 |
def add_common_arguments(parser):
"""Add common and configuration-related arguments to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This functions adds the common arguments for Sopel's command line tools.
It adds the following arguments:
* ``-c/--config``: the name of the Sopel config, or its absolute path
* ``--config-dir``: the directory to look for Sopel config
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
[sopel-command] --config-dir [directory] -c [name]
Then, when the parser parses the command line arguments, it will expose
``config`` and ``configdir`` options that can be used to find and load
Sopel's settings.
.. seealso::
The :func:`sopel.cli.utils.load_settings` function uses an ``options``
object from a parser configured with such arguments.
"""
parser.add_argument(
'-c', '--config',
default='default',
metavar='filename',
dest='config',
help='Use a specific configuration file.')
parser.add_argument(
'--config-dir',
default=config.DEFAULT_HOMEDIR,
dest='configdir',
help='Look for configuration from this Sopel config directory.')
|
def add_common_arguments(parser):
"""Add common and configuration-related arguments to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This functions adds the common arguments for Sopel's command line tools.
It adds the following arguments:
* ``-c/--config``: the name of the Sopel config, or its absolute path
* ``--config-dir``: the directory to look for Sopel config
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
[sopel-command] --config-dir [directory] -c [name]
Then, when the parser parses the command line arguments, it will expose
``config`` and ``configdir`` options that can be used to find and load
Sopel's settings.
.. seealso::
The :func:`sopel.cli.utils.load_settings` function uses an ``options``
object from a parser configured with such arguments.
"""
parser.add_argument(
'-c', '--config',
default='default',
metavar='filename',
dest='config',
help='Use a specific configuration file.')
parser.add_argument(
'--config-dir',
default=config.DEFAULT_HOMEDIR,
dest='configdir',
help='Look for configuration files in this directory.')
|
9,078 |
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn("Extracting in %s", tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn("Now working in %s", subdir)
# installing
log.warn("Installing Setuptools")
if not _python_cmd("setup.py", "install", *install_args):
log.warn("Something went wrong during the installation.")
log.warn("See the error message above.")
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
|
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn(f"Extracting in {tmpdir}")
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn("Now working in %s", subdir)
# installing
log.warn("Installing Setuptools")
if not _python_cmd("setup.py", "install", *install_args):
log.warn("Something went wrong during the installation.")
log.warn("See the error message above.")
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
|
47,686 |
def run_pipx_command(args):
setup(args)
verbose = args.verbose
if "package" in args:
package = args.package
if urllib.parse.urlparse(package).scheme:
raise PipxError("Package cannot be a url")
if package == "pipx":
if args.command == "uninstall":
logging.warning(
"Did you mean to use 'pipx-app' instead of 'pipx' for the package name?"
)
else:
raise PipxError("use 'pipx-app' instead of 'pipx' for package name")
if "spec" in args and args.spec is not None:
if urllib.parse.urlparse(args.spec).scheme:
if "#egg=" not in args.spec:
args.spec = args.spec + f"#egg={package}"
venv_dir = pipx_local_venvs / package
logging.info(f"virtualenv location is {venv_dir}")
if args.command == "install":
package_or_url = (
args.spec if ("spec" in args and args.spec is not None) else package
)
install(
venv_dir,
package,
package_or_url,
local_bin_dir,
args.python,
verbose,
force=args.force,
)
elif args.command == "inject":
package_or_url = (
args.spec if ("spec" in args and args.spec is not None) else args.dependency
)
inject(venv_dir, args.dependency, package_or_url, verbose)
elif args.command == "upgrade":
package_or_url = (
args.spec if ("spec" in args and args.spec is not None) else package
)
upgrade(venv_dir, package, package_or_url, verbose, upgrading_all=False)
elif args.command == "list":
list_packages(pipx_local_venvs)
elif args.command == "uninstall":
uninstall(venv_dir, package, local_bin_dir, verbose)
elif args.command == "uninstall-all":
uninstall_all(pipx_local_venvs, local_bin_dir, verbose)
print(f"To reinstall pipx, run '{INSTALL_PIPX_CMD}'")
elif args.command == "upgrade-all":
upgrade_all(pipx_local_venvs, verbose)
elif args.command == "reinstall-all":
reinstall_all(pipx_local_venvs, local_bin_dir, args.python, verbose)
else:
raise PipxError(f"Unknown command {args.command}")
|
def run_pipx_command(args):
setup(args)
verbose = args.verbose
if "package" in args:
package = args.package
if urllib.parse.urlparse(package).scheme:
raise PipxError("Package cannot be a url")
if package == "pipx":
if args.command == "uninstall":
logging.warning(
"Did you mean to use 'pipx-app' instead of 'pipx' for the package name?"
)
else:
raise PipxError("use 'pipx-app' instead of 'pipx' for package name")
if "spec" in args and args.spec is not None:
if urllib.parse.urlparse(args.spec).scheme:
if "#egg=" not in args.spec:
args.spec = args.spec + f"#egg={package}"
venv_dir = pipx_local_venvs / package
logging.info(f"virtualenv location is {venv_dir}")
if args.command == "install":
package_or_url = (
args.spec if ("spec" in args and args.spec is not None) else package
)
install(
venv_dir,
package,
package_or_url,
local_bin_dir,
args.python,
verbose,
force=args.force,
)
elif args.command == "inject":
package_or_url = (
args.spec if ("spec" in args and args.spec is not None) else args.dependency
)
inject(venv_dir, args.dependency, verbose)
elif args.command == "upgrade":
package_or_url = (
args.spec if ("spec" in args and args.spec is not None) else package
)
upgrade(venv_dir, package, package_or_url, verbose, upgrading_all=False)
elif args.command == "list":
list_packages(pipx_local_venvs)
elif args.command == "uninstall":
uninstall(venv_dir, package, local_bin_dir, verbose)
elif args.command == "uninstall-all":
uninstall_all(pipx_local_venvs, local_bin_dir, verbose)
print(f"To reinstall pipx, run '{INSTALL_PIPX_CMD}'")
elif args.command == "upgrade-all":
upgrade_all(pipx_local_venvs, verbose)
elif args.command == "reinstall-all":
reinstall_all(pipx_local_venvs, local_bin_dir, args.python, verbose)
else:
raise PipxError(f"Unknown command {args.command}")
|
4,189 |
def make_resolution_matrix(fwd, invop, method, lambda2):
"""Compute resolution matrix for linear inverse operator.
Parameters
----------
fwd: forward solution
Used to get leadfield matrix.
invop: inverse operator
Inverse operator to get inverse matrix.
pick_ori='normal' will be selected.
method: string
Inverse method to use (MNE, dSPM, sLORETA).
lambda2: float
The regularisation parameter.
Returns
-------
resmat: 2D numpy array.
Resolution matrix (inverse matrix times leadfield).
"""
# make sure forward and inverse operator match
fwd = _convert_forward_match_inv(fwd, invop)
# don't include bad channels
# only use good channels from inverse operator
bads_inv = invop['info']['bads']
# good channels
ch_names = [c for c in invop['info']['ch_names'] if (c not in bads_inv)]
# get leadfield matrix from forward solution
leadfield = _pick_leadfield(fwd['sol']['data'], fwd, ch_names)
invmat = _get_matrix_from_inverse_operator(invop, fwd,
method=method, lambda2=lambda2)
resmat = invmat.dot(leadfield)
dims = resmat.shape
print('Dimensions of resolution matrix: %d by %d.' % (dims[0], dims[1]))
return resmat
|
def make_resolution_matrix(fwd, invop, method, lambda2):
"""Compute resolution matrix for linear inverse operator.
Parameters
----------
fwd: forward solution
Used to get leadfield matrix.
inverse_operator: instance of InverseOperator
Inverse operator to get inverse matrix.
pick_ori='normal' will be selected.
method: string
Inverse method to use (MNE, dSPM, sLORETA).
lambda2: float
The regularisation parameter.
Returns
-------
resmat: 2D numpy array.
Resolution matrix (inverse matrix times leadfield).
"""
# make sure forward and inverse operator match
fwd = _convert_forward_match_inv(fwd, invop)
# don't include bad channels
# only use good channels from inverse operator
bads_inv = invop['info']['bads']
# good channels
ch_names = [c for c in invop['info']['ch_names'] if (c not in bads_inv)]
# get leadfield matrix from forward solution
leadfield = _pick_leadfield(fwd['sol']['data'], fwd, ch_names)
invmat = _get_matrix_from_inverse_operator(invop, fwd,
method=method, lambda2=lambda2)
resmat = invmat.dot(leadfield)
dims = resmat.shape
print('Dimensions of resolution matrix: %d by %d.' % (dims[0], dims[1]))
return resmat
|
38,540 |
def match_1d(
new_g: pp.Grid,
old_g: pp.Grid,
tol: float,
scaling: Optional[Literal["averaged", "integrated"]] = None,
) -> sps.spmatrix:
"""Obtain mappings between the cells of non-matching 1d grids.
The overlaps are identified as a sparse matrix which maps from cells in the old to
the new grid.
It is asumed that the two grids are aligned, with common start and
endpoints.
Parameters:
new_g (pp.Grid). Target grid for the mapping. Should have dimension 1.
old_g (pp.Grid). Original grid. Should have dimension 1.
tol (float): Tolerance used to filter away false overlaps caused by
numerical errors. Should be scaled relative to the cell size.
scaling (str, optional): Control weights of the returned matrix, see return
values for specification.
Returns:
sps.spmatrix: Mapping from the cells in the old to the new grid. The values in
the matrix depends on the parameter scaling: If set to 'averaged', a mapping
fit for intensive quantities (e.g., pressure) is returned (all rows sum to
unity). If set to 'integrated', the matrix is a mapping for extensive
quantities (column sum is 1). If not provided, the matrix elements are 1
for cell-pairs (new and old grid) that overlap; overlaps with areas less
than the parameter tol will be ignored.
"""
# Cell-node relation between grids - we know there are two nodes per cell
cell_nodes_new = new_g.cell_nodes()
cell_nodes_old = old_g.cell_nodes()
nodes_new = pp.utils.mcolon.mcolon(
cell_nodes_new.indptr[0:-1], cell_nodes_new.indptr[1:]
)
nodes_old = pp.utils.mcolon.mcolon(
cell_nodes_old.indptr[0:-1], cell_nodes_old.indptr[1:]
)
# Reshape so that the nodes of cells are stored columnwise
lines_new = cell_nodes_new.indices[nodes_new].reshape((2, -1), order="F")
lines_old = cell_nodes_old.indices[nodes_old].reshape((2, -1), order="F")
p_new = new_g.nodes
p_old = old_g.nodes
# Compute the intersection between the two tessalations.
# intersect is a list, every list member is a tuple with overlapping
# cells in grid 1 and 2, and their common area.
intersect = pp.intersections.line_tesselation(p_new, p_old, lines_new, lines_old)
num = len(intersect)
new_g_ind = np.zeros(num, dtype=int)
old_g_ind = np.zeros(num, dtype=int)
weights = np.zeros(num)
for ind, i in enumerate(intersect):
new_g_ind[ind] = i[0]
old_g_ind[ind] = i[1]
weights[ind] = i[2]
# The weights as computed from the intersection algorithm gives the volumes of the
# intersected cells. Depending on the specified scaling, the weights should be
# modified.
if scaling == "averaged":
weights /= new_g.cell_volumes[new_g_ind]
elif scaling == "integrated":
weights /= old_g.cell_volumes[old_g_ind]
elif scaling is None:
mask = weights > tol
new_g_ind = new_g_ind[mask]
old_g_ind = old_g_ind[mask]
weights = np.ones_like(new_g_ind)
return sps.coo_matrix(
(weights, (new_g_ind, old_g_ind)), shape=(new_g.num_cells, old_g.num_cells)
).tocsr()
|
def match_1d(
new_g: pp.Grid,
old_g: pp.Grid,
tol: float,
scaling: Optional[Literal["averaged", "integrated"]] = None,
) -> sps.spmatrix:
"""Obtain mappings between the cells of non-matching 1d grids.
The overlaps are identified as a sparse matrix which maps from cells in the old to
the new grid.
It is asumed that the two grids are aligned, with common start and
endpoints.
Parameters:
new_g (pp.Grid). Target grid for the mapping. Should have dimension 1.
old_g (pp.Grid). Original grid. Should have dimension 1.
tol (float): Tolerance used to filter away false overlaps caused by
numerical errors. Should be scaled relative to the cell size.
scaling (str, optional): Control weights of the returned matrix, see return
values for specification.
Returns:
sps.spmatrix: Mapping from the cells in the old to the new grid. The values in
the matrix depends on the parameter scaling: If set to 'averaged', a mapping
fit for intensive quantities (e.g., pressure) is returned (all rows sum to
unity). If set to 'integrated', the matrix is a mapping for extensive
quantities (column sum is 1). If not provided, the matrix elements are 1
for cell-pairs (new and old grid) that overlap; overlaps with areas less
than the parameter tol will be ignored.
"""
# Cell-node relation between grids - we know there are two nodes per cell
cell_nodes_new = new_g.cell_nodes()
cell_nodes_old = old_g.cell_nodes()
nodes_new = pp.utils.mcolon.mcolon(
cell_nodes_new.indptr[0:-1], cell_nodes_new.indptr[1:]
)
nodes_old = pp.utils.mcolon.mcolon(
cell_nodes_old.indptr[0:-1], cell_nodes_old.indptr[1:]
)
# Reshape so that the nodes of cells are stored columnwise
lines_new = cell_nodes_new.indices[nodes_new].reshape((2, -1), order="F")
lines_old = cell_nodes_old.indices[nodes_old].reshape((2, -1), order="F")
p_new = new_g.nodes
p_old = old_g.nodes
# Compute the intersection between the two tesselations.
# intersect is a list, every list member is a tuple with overlapping
# cells in grid 1 and 2, and their common area.
intersect = pp.intersections.line_tesselation(p_new, p_old, lines_new, lines_old)
num = len(intersect)
new_g_ind = np.zeros(num, dtype=int)
old_g_ind = np.zeros(num, dtype=int)
weights = np.zeros(num)
for ind, i in enumerate(intersect):
new_g_ind[ind] = i[0]
old_g_ind[ind] = i[1]
weights[ind] = i[2]
# The weights as computed from the intersection algorithm gives the volumes of the
# intersected cells. Depending on the specified scaling, the weights should be
# modified.
if scaling == "averaged":
weights /= new_g.cell_volumes[new_g_ind]
elif scaling == "integrated":
weights /= old_g.cell_volumes[old_g_ind]
elif scaling is None:
mask = weights > tol
new_g_ind = new_g_ind[mask]
old_g_ind = old_g_ind[mask]
weights = np.ones_like(new_g_ind)
return sps.coo_matrix(
(weights, (new_g_ind, old_g_ind)), shape=(new_g.num_cells, old_g.num_cells)
).tocsr()
|
56,352 |
def _test_pytest_function(pyfuncitem):
_pdb = pytestPDB._init_pdb("runcall")
testfunction = pyfuncitem.obj
pyfuncitem.obj = _pdb.runcall
if "func" in pyfuncitem.funcargs: # pragma: no branch
raise ValueError("--trace can't be used with a fixture named func!")
pyfuncitem.funcargs["func"] = testfunction
if "func" not in pyfuncitem._fixtureinfo.argnames:
# if using e.g. parametrize the _fixtureinfo is shared
new_list = list(pyfuncitem._fixtureinfo.argnames)
new_list.append("func")
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
|
def _test_pytest_function(pyfuncitem):
_pdb = pytestPDB._init_pdb("runcall")
testfunction = pyfuncitem.obj
pyfuncitem.obj = _pdb.runcall
if "func" in pyfuncitem.funcargs: # pragma: no branch
raise ValueError("--trace can't be used with a fixture named func!")
pyfuncitem.funcargs["func"] = testfunction
if "func" not in pyfuncitem._fixtureinfo.argnames:
# TODO: when using parameterized tests, the _fixtureinfo is shared
# that needs to move to FunctionDefinition
new_list = list(pyfuncitem._fixtureinfo.argnames)
new_list.append("func")
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
|
48,645 |
def check_metadata() -> None:
for distribution in os.listdir("stubs"):
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = tomli.loads(f.read())
assert "version" in data, f"Missing version for {distribution}"
version = data["version"]
msg = f"Unsupported Python version {version}"
assert isinstance(version, str), msg
assert re.fullmatch(r"\d+(\.\d+)+|\d+(\.\d+)*\.\*", version), msg
for key in data:
assert key in metadata_keys, f"Unexpected key {key} for {distribution}"
assert isinstance(data.get("requires", []), list), f"Invalid requires value for {distribution}"
for dep in data.get("requires", []):
assert isinstance(dep, str), f"Invalid dependency {dep} for {distribution}"
for space in " \t\n":
assert space not in dep, f"For consistency dependency should not have whitespace: {dep}"
assert ";" not in dep, f"Semicolons in dependencies are not supported, got {dep}"
stripped, relation, dep_version = _strip_dep_version(dep)
if relation:
assert relation in {"==", ">", ">=", "<", "<="}, f"Bad relation '{relation}' in dependency {dep}"
assert dep_version.count(".") <= 2, f"Bad version '{dep_version}' in dependency {dep}"
for part in dep_version.split("."):
assert part.isnumeric(), f"Bad version '{part}' in dependency {dep}"
assert set(data.get("tool", [])).issubset({"stubtest"}), f"Unrecognised tool for {distribution}"
for key in data.get("tool", {}).get("stubtest", {}):
assert key in stubtest_keys, f"Unrecognised stubtest key {key} for {distribution}"
|
def check_metadata() -> None:
for distribution in os.listdir("stubs"):
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = tomli.loads(f.read())
assert "version" in data, f"Missing version for {distribution}"
version = data["version"]
msg = f"Unsupported Python version {version}"
assert isinstance(version, str), msg
assert re.fullmatch(r"\d+(\.\d+)+|\d+(\.\d+)*\.\*", version), msg
for key in data:
assert key in metadata_keys, f"Unexpected key {key} for {distribution}"
assert isinstance(data.get("requires", []), list), f"Invalid requires value for {distribution}"
for dep in data.get("requires", []):
assert isinstance(dep, str), f"Invalid dependency {dep} for {distribution}"
for space in " \t\n":
assert space not in dep, f"For consistency dependency should not have whitespace: {dep}"
assert ";" not in dep, f"Semicolons in dependencies are not supported, got {dep}"
stripped, relation, dep_version = _strip_dep_version(dep)
if relation:
assert relation in {"==", ">", ">=", "<", "<="}, f"Bad relation '{relation}' in dependency {dep}"
assert dep_version.count(".") <= 2, f"Bad version '{dep_version}' in dependency {dep}"
for part in dep_version.split("."):
assert part.isnumeric(), f"Bad version '{part}' in dependency {dep}"
assert set(data.get("tool", [])).issubset(tool_keys.keys()), f"Unrecognised tool for {distribution}"
for tool, tk in tool_keys.items():
for key in data.get("tool", {}).get(tool, {}):
assert key in tk, f"Unrecognised {tool} key {key} for {distribution}"
|
750 |
def get_data_independent_estimation_quantities(design_matrix, regularization_matrix=None):
Q = compute_unscaled_posterior_precision(design_matrix, regularization_matrix)
unscaled_posterior_covariance = covariance_from_precision(Q)
# TODO: evaluate whether using the explicit inverse leads to numerical instability
pseudoInv = np.einsum('...ij, ...kj->...ik', unscaled_posterior_covariance, design_matrix)
# pseudoInv = np.linalg.solve(S, np.swapaxes(design_matrix, -1, -2))
degrees_of_freedom = compute_degrees_of_freedom(design_matrix, pseudoInv)
return unscaled_posterior_covariance, pseudoInv, degrees_of_freedom
|
def get_data_independent_estimation_quantities(design_matrix, regularization_matrix=None):
Q = compute_unscaled_posterior_precision(design_matrix, regularization_matrix)
unscaled_posterior_covariance = covariance_from_precision(Q)
# TODO: evaluate whether using the explicit inverse leads to numerical instability
pseudoInv = np.einsum('...ij, ...kj->...ik', unscaled_posterior_covariance, design_matrix)
# pseudoInv = np.linalg.solve(S, np.swapaxes(design_matrix, -1, -2))
degrees_of_freedom = compute_degrees_of_freedom(design_matrix, pseudoInv)
return unscaled_posterior_covariance, pseudo_inv, degrees_of_freedom
|
31,789 |
def update_group_command(client, args):
scim = safe_load_json(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not group_id:
return_error("You must supply 'id' in the scim data")
member_ids_to_add = args.get('memberIdsToAdd')
member_ids_to_delete = args.get('memberIdsToDelete')
operations = []
member_ids_json_list = []
if member_ids_to_add:
if type(member_ids_to_add) is not list:
member_ids_to_add = json.loads(member_ids_to_add)
for member_id in member_ids_to_add:
member_ids_json_list.append(
{
"value": member_id
}
)
if member_ids_json_list:
operation = {
"op": "add",
"path": "members",
"value": member_ids_json_list
}
operations.append(operation)
if member_ids_to_delete:
if type(member_ids_to_delete) is not list:
member_ids_to_delete = json.loads(member_ids_to_delete)
for member_id in member_ids_to_delete:
operation = {
"op": "remove",
"path": f"members[value eq \"{member_id}\"]"
}
operations.append(operation)
group_input = {'schemas': ["urn:ietf:params:scim:api:messages:2.0:PatchOp"], 'Operations': operations}
res = client.update_group(group_id, group_input)
res_json = res.json()
if res.status_code == 200:
generic_iam_context = OutputContext(success=True, id=group_id, displayName=group_name)
elif res.status_code == 404:
generic_iam_context = OutputContext(success=False, id=group_id, displayName=group_name, errorCode=404,
errorMessage="Group/User Not Found or User not a member of group",
details=res.json())
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
readable_output = tableToMarkdown('Oracle Cloud Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
|
def update_group_command(client, args):
scim = safe_load_json(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not group_id:
return_error("You must supply 'id' in the scim data")
member_ids_to_add = args.get('memberIdsToAdd')
member_ids_to_delete = args.get('memberIdsToDelete')
operations = []
member_ids_json_list = []
if member_ids_to_add:
if type(member_ids_to_add) is not list:
member_ids_to_add = json.loads(member_ids_to_add)
for member_id in member_ids_to_add:
member_ids_json_list.append(
{
"value": member_id
}
)
if member_ids_json_list:
operation = {
"op": "add",
"path": "members",
"value": member_ids_json_list
}
operations.append(operation)
if member_ids_to_delete:
if type(member_ids_to_delete) is not list:
member_ids_to_delete = json.loads(member_ids_to_delete)
for member_id in member_ids_to_delete:
operation = {
"op": "remove",
"path": f"members[value eq \"{member_id}\"]"
}
operations.append(operation)
group_input = {'schemas': ['urn:ietf:params:scim:api:messages:2.0:PatchOp'], 'Operations': operations}
res = client.update_group(group_id, group_input)
res_json = res.json()
if res.status_code == 200:
generic_iam_context = OutputContext(success=True, id=group_id, displayName=group_name)
elif res.status_code == 404:
generic_iam_context = OutputContext(success=False, id=group_id, displayName=group_name, errorCode=404,
errorMessage="Group/User Not Found or User not a member of group",
details=res.json())
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
readable_output = tableToMarkdown('Oracle Cloud Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
|
45,354 |
def get_benchmarks_shapes(bench_id: str, default: list):
"""
Get custom benchmarks shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.
If `bench_id` benchmark is not found in the file, then the default value will
be used.
Parameters
----------
bench_id : str
Unique benchmark identifier that is used to get shapes.
default : list
Default shapes.
Returns
-------
list
Benchmark shapes.
"""
try:
from modin.config import AsvDatasizeConfig
filename = AsvDatasizeConfig.get()
except ImportError:
filename = os.environ.get("MODIN_ASV_DATASIZE_CONFIG", None)
if filename:
global CONFIG_FROM_FILE
if not CONFIG_FROM_FILE:
# should be json
with open(filename) as _f:
CONFIG_FROM_FILE = json.load(_f)
if bench_id in CONFIG_FROM_FILE:
# convert strings to tuples;
# example: "omnisci.TimeReadCsvNames": ["(5555, 55)", "(3333, 33)"]
shapes = [eval(shape) for shape in CONFIG_FROM_FILE[bench_id]]
return shapes
return default
|
def get_benchmarks_shapes(bench_id: str, default: list):
"""
Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.
If `bench_id` benchmark is not found in the file, then the default value will
be used.
Parameters
----------
bench_id : str
Unique benchmark identifier that is used to get shapes.
default : list
Default shapes.
Returns
-------
list
Benchmark shapes.
"""
try:
from modin.config import AsvDatasizeConfig
filename = AsvDatasizeConfig.get()
except ImportError:
filename = os.environ.get("MODIN_ASV_DATASIZE_CONFIG", None)
if filename:
global CONFIG_FROM_FILE
if not CONFIG_FROM_FILE:
# should be json
with open(filename) as _f:
CONFIG_FROM_FILE = json.load(_f)
if bench_id in CONFIG_FROM_FILE:
# convert strings to tuples;
# example: "omnisci.TimeReadCsvNames": ["(5555, 55)", "(3333, 33)"]
shapes = [eval(shape) for shape in CONFIG_FROM_FILE[bench_id]]
return shapes
return default
|
59,640 |
def args_in_kwargs(args, kwargs):
"""
Take a list and a dictionary, and determine if any entries in the list are
keys in the dictionary.
This function is used to determine if one of the required arguments is
passed to raise a GMTInvalidInput Error.
Parameters
----------
args : list
List of required arguments, using the GMT short aliases.
kwargs : dict
The dictionary of kwargs is the format returned by the _preprocess
function in BasePlotting in base_plotting.py. The keys are the GMT
short aliases of the parameters.
"""
for arg in args:
if arg in list(kwargs.keys()):
return True
return False
|
def args_in_kwargs(args, kwargs):
"""
Take a list and a dictionary, and determine if any entries in the list are
keys in the dictionary.
This function is used to determine if one of the required arguments is
passed to raise a GMTInvalidInput Error.
Parameters
----------
args : list
List of required arguments, using the GMT short aliases.
kwargs : dict
The dictionary of kwargs is the format returned by the _preprocess
function in BasePlotting in base_plotting.py. The keys are the GMT
short aliases of the parameters.
Returns
--------
bool
If one of the required arguments is in `kwargs`.
"""
for arg in args:
if arg in list(kwargs.keys()):
return True
return False
|
38,063 |
def _load_static_earth_relief(**kwargs):
"""
Load the static_earth_relief file for internal testing.
Returns
-------
data : xarray.DataArray
A grid of Earth relief for internal tests.
"""
fname = which("@static_earth_relief.nc", download="c")
data = xr.open_dataarray(fname)
return data
|
def _load_static_earth_relief(**kwargs): # pylint: disable=unused-argument
"""
Load the static_earth_relief file for internal testing.
Returns
-------
data : xarray.DataArray
A grid of Earth relief for internal tests.
"""
fname = which("@static_earth_relief.nc", download="c")
data = xr.open_dataarray(fname)
return data
|
31,574 |
def fetch_incidents(client, first_fetch, max_fetch):
"""
This function retrieves new incidents every interval (default is 1 minute).
"""
now = datetime.timestamp(datetime.utcnow())
last_run_object = demisto.getLastRun()
last_run = last_run_object.get('time', None) if last_run_object else None
if not last_run:
if first_fetch:
last_run = float(first_fetch)
else:
last_run = datetime.timestamp(datetime.utcnow() - timedelta(days=1))
result = client.get_incidents(last_run, now)
if not result.get('success'):
raise DemistoException(f"ERROR: {result['message']}; last_run: {last_run}; now: {now}")
lp_incidents = result.get('incidents')
incidents = []
if len(lp_incidents) > max_fetch:
next_fetch_time = lp_incidents[max_fetch]['detection_timestamp']
lp_incidents = lp_incidents[:max_fetch]
else:
next_fetch_time = now
demisto.info(f"Executing LogPoint fetch_incidents between {last_run} and {next_fetch_time} Timestamp.")
for inc in lp_incidents:
detection_ts = inc['detection_timestamp']
dt = datetime.utcfromtimestamp(detection_ts)
occurred = dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
incidents.append({
'name': inc.get('name', 'LogPoint - No name'),
'occurred': occurred,
'severity': get_demisto_severity(inc.get('risk_level')),
'rawJSON': json.dumps(inc)
})
demisto.setLastRun({'time': next_fetch_time})
return incidents
|
def fetch_incidents(client, first_fetch, max_fetch):
"""
This function retrieves new incidents every interval (default is 1 minute).
"""
now = datetime.timestamp(datetime.utcnow())
last_run_object = demisto.getLastRun()
last_run = last_run_object.get('time', None) if last_run_object else None
if not last_run:
if first_fetch:
last_run = float(first_fetch)
else:
last_run = datetime.timestamp(datetime.utcnow() - timedelta(days=1))
result = client.get_incidents(last_run, now)
if not result.get('success'):
raise DemistoException(f"ERROR: {result.get('message')}; last_run: {last_run}; now: {now}")
lp_incidents = result.get('incidents')
incidents = []
if len(lp_incidents) > max_fetch:
next_fetch_time = lp_incidents[max_fetch]['detection_timestamp']
lp_incidents = lp_incidents[:max_fetch]
else:
next_fetch_time = now
demisto.info(f"Executing LogPoint fetch_incidents between {last_run} and {next_fetch_time} Timestamp.")
for inc in lp_incidents:
detection_ts = inc['detection_timestamp']
dt = datetime.utcfromtimestamp(detection_ts)
occurred = dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
incidents.append({
'name': inc.get('name', 'LogPoint - No name'),
'occurred': occurred,
'severity': get_demisto_severity(inc.get('risk_level')),
'rawJSON': json.dumps(inc)
})
demisto.setLastRun({'time': next_fetch_time})
return incidents
|
22,063 |
def parse_message(xml):
"""
解析微信服务器推送的 XML 消息
:param xml: XML 消息
:return: 解析成功返回对应的消息或事件,否则返回 ``UnknownMessage``
"""
if not xml:
return
message = xmltodict.parse(to_text(xml))['xml']
message_type = message['MsgType'].lower()
event_type = None
if message_type == 'event' or message_type.startswith('device_'):
if 'Event' in message:
event_type = message['Event'].lower()
# special event type for device_event
if event_type is None and message_type.startswith('device_'):
event_type = message_type
elif message_type.startswith('device_'):
event_type = 'device_{event}'.format(event=event_type)
if event_type == 'subscribe' and message.get('EventKey'):
event_key = message['EventKey']
if event_key.startswith(('scanbarcode|', 'scanimage|')):
event_type = 'subscribe_scan_product'
message['Event'] = event_type
elif event_key.startswith('qrscene_'):
# Scan to subscribe with scene id event
event_type = 'subscribe_scan'
message['Event'] = event_type
message['EventKey'] = event_key[8:]
message_class = EVENT_TYPES.get(event_type, UnknownMessage)
else:
message_class = MESSAGE_TYPES.get(message_type, UnknownMessage)
return message_class(message)
|
def parse_message(xml):
"""
解析微信服务器推送的 XML 消息
:param xml: XML 消息
:return: 解析成功返回对应的消息或事件,否则返回 ``UnknownMessage``
"""
if not xml:
return
message = xmltodict.parse(to_text(xml))['xml']
message_type = message['MsgType'].lower()
event_type = None
if message_type == 'event' or message_type.startswith('device_'):
if 'Event' in message:
event_type = message['Event'].lower()
# special event type for device_event
if event_type is None and message_type.startswith('device_'):
event_type = message_type
elif message_type.startswith('device_'):
event_type = 'device_{event}'.format(event=event_type)
if event_type == 'subscribe' and message.get('EventKey'):
event_key = message['EventKey']
if event_key.startswith(('scanbarcode|', 'scanimage|')):
event_type = 'subscribe_scan_product'
message['Event'] = event_type
elif event_key.startswith('qrscene_'):
# Scan to subscribe with scene id event
event_type = 'subscribe_scan'
message['Event'] = event_type
message['EventKey'] = event_key[len('qrscene_'):]
message_class = EVENT_TYPES.get(event_type, UnknownMessage)
else:
message_class = MESSAGE_TYPES.get(message_type, UnknownMessage)
return message_class(message)
|
39,273 |
def cell_array(mesh, name):
"""Return cell scalars of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
|
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
|
15,956 |
def get_resource(domain_name: str, domain_data: ConfigType) -> str:
"""Return the resource for the specified domain_data."""
if domain_name in ["switch", "light"]:
return str(domain_data["output"])
if domain_name in ["binary_sensor", "sensor"]:
return str(domain_data["source"])
if domain_name == "cover":
return str(domain_data["motor"])
if domain_name == "climate":
return f'{domain_data["source"]}.{domain_data["setpoint"]}'
if domain_name == "scene":
return f'{domain_data["register"]}.{domain_data["scene"]}'
raise ValueError("Unknown domain")
|
def get_resource(domain_name: str, domain_data: ConfigType) -> str:
"""Return the resource for the specified domain_data."""
if domain_name in ["switch", "light"]:
return Cast(str, domain_data["output"])
if domain_name in ["binary_sensor", "sensor"]:
return str(domain_data["source"])
if domain_name == "cover":
return str(domain_data["motor"])
if domain_name == "climate":
return f'{domain_data["source"]}.{domain_data["setpoint"]}'
if domain_name == "scene":
return f'{domain_data["register"]}.{domain_data["scene"]}'
raise ValueError("Unknown domain")
|
17,652 |
def _make_github_repos(
github_login, github_passwd, github_organization, rinfo, existing,
access_protocol, dryrun):
res = []
if not rinfo:
return res # no need to even try!
ncredattempts = 0
# determine the entity under which to create the repos. It might be that
# we would need to check a few credentials
for entity, cred in _gen_github_entity(
github_login,
github_passwd,
github_organization):
lgr.debug("Using entity %s with credential %s", entity, cred)
ncredattempts += 1
for ds, reponame in rinfo:
lgr.debug("Trying to create %s for %s", reponame, ds)
try:
res_ = _make_github_repo(
github_login,
entity,
reponame,
existing,
access_protocol,
dryrun)
# output will contain whatever is returned by _make_github_repo
# but with a dataset prepended to the record
res.append((ds,) + assure_tuple_or_list(res_))
except (gh.BadCredentialsException, gh.GithubException) as e:
if not isinstance(e, gh.BadCredentialsException) and e.status != 403:
# e.g. while deleting a repository, just a generic GithubException is
# raised but code is 403. That one we process, the rest - re-rase
raise
if res:
# so we have succeeded with at least one repo already -
# we should not try any other credential.
# TODO: may be it would make sense to have/use different
# credentials for different datasets e.g. if somehow spread
# across different organizations? but it is not the case here
# IMHO (-- yoh)
raise e
# things blew up, wipe out cred store, if anything is in it
if cred:
lgr.warning("Authentication failed using %s.", cred.name)
else:
lgr.warning("Authentication failed using a token.")
break # go to the next attempt to authenticate
if res:
return res
# External loop should stop querying for the next possible way when it succeeds,
# so we should never get here if everything worked out
if ncredattempts:
raise AccessDeniedError(
"Tried %d times to get authenticated access to GitHub but kept failing"
% ncredattempts
)
else:
raise RuntimeError("Did not even try to create a repo on github")
|
def _make_github_repos(
github_login, github_passwd, github_organization, rinfo, existing,
access_protocol, dryrun):
res = []
if not rinfo:
return res # no need to even try!
ncredattempts = 0
# determine the entity under which to create the repos. It might be that
# we would need to check a few credentials
for entity, cred in _gen_github_entity(
github_login,
github_passwd,
github_organization):
lgr.debug("Using entity %s with credential %s", entity, cred)
ncredattempts += 1
for ds, reponame in rinfo:
lgr.debug("Trying to create %s for %s", reponame, ds)
try:
res_ = _make_github_repo(
github_login,
entity,
reponame,
existing,
access_protocol,
dryrun)
# output will contain whatever is returned by _make_github_repo
# but with a dataset prepended to the record
res.append((ds,) + assure_tuple_or_list(res_))
except (gh.BadCredentialsException, gh.GithubException) as e:
if not isinstance(e, gh.BadCredentialsException) and e.status != 403:
# e.g. while deleting a repository, just a generic GithubException is
# raised but code is 403. That one we process, the rest - re-raise
raise
if res:
# so we have succeeded with at least one repo already -
# we should not try any other credential.
# TODO: may be it would make sense to have/use different
# credentials for different datasets e.g. if somehow spread
# across different organizations? but it is not the case here
# IMHO (-- yoh)
raise e
# things blew up, wipe out cred store, if anything is in it
if cred:
lgr.warning("Authentication failed using %s.", cred.name)
else:
lgr.warning("Authentication failed using a token.")
break # go to the next attempt to authenticate
if res:
return res
# External loop should stop querying for the next possible way when it succeeds,
# so we should never get here if everything worked out
if ncredattempts:
raise AccessDeniedError(
"Tried %d times to get authenticated access to GitHub but kept failing"
% ncredattempts
)
else:
raise RuntimeError("Did not even try to create a repo on github")
|
54,444 |
def test_mean_decrease_impurity_importance_evaluator_max_depth() -> None:
# Assumes that `seed` can be fixed to reproduce identical results.
study = create_study()
study.optimize(objective, n_trials=3)
evaluator = MeanDecreaseImpurityImportanceEvaluator(max_depth=1, seed=0)
param_importance = evaluator.evaluate(study)
evaluator = MeanDecreaseImpurityImportanceEvaluator(max_depth=2, seed=0)
param_importance_different_n_trees = evaluator.evaluate(study)
assert param_importance != param_importance_different_n_trees
|
def test_mean_decrease_impurity_importance_evaluator_max_depth() -> None:
# Assumes that `seed` can be fixed to reproduce identical results.
study = create_study()
study.optimize(objective, n_trials=3)
evaluator = MeanDecreaseImpurityImportanceEvaluator(max_depth=1, seed=0)
param_importance = evaluator.evaluate(study)
evaluator = MeanDecreaseImpurityImportanceEvaluator(max_depth=2, seed=0)
param_importance_different_max_depth = evaluator.evaluate(study)
assert param_importance != param_importance_different_max_depth
|
45,832 |
def reproject_disparity_to_3D(disparity_tensor: torch.Tensor, Q_matrix: torch.Tensor) -> torch.Tensor:
"""
Reproject the disparity tensor to a 3D point cloud.
Args:
disparity_tensor (torch.Tensor): Disparity tensor of shape :math:`(B, H, W)`.
Q_matrix (torch.Tensor): Tensor of Q matrices of shapes :math:`(B, 4, 4)`.
Returns:
torch.Tensor: The 3D point cloud of shape :math:`(B, H * W, 3)`
"""
_check_Q_matrix(Q_matrix)
_check_disparity_tensor(disparity_tensor)
batch_size, rows, cols = disparity_tensor.shape
dtype = disparity_tensor.dtype
device = disparity_tensor.device
homogenous_observation_ndim = 4
euclidian_observation_ndim = homogenous_observation_ndim - 1
# Construct a mesh grid of uv values, i.e. the tensors will contain 1:rows and 1:cols.
u, v = torch.meshgrid(
torch.arange(rows, dtype=dtype, device=device), torch.arange(cols, dtype=dtype, device=device)
)
u, v = u.expand(batch_size, -1, -1), v.expand(batch_size, -1, -1)
# The z dimension in homogenous coordinates are just 1.
z = torch.ones((batch_size, rows, cols), dtype=dtype, device=device)
# Stack the observations into a tensor of shape (batch_size, 4, -1) that contains all
# 4 dimensional vectors [u v disparity 1].
uvdz = torch.stack((u, v, disparity_tensor, z), 1).reshape(batch_size, homogenous_observation_ndim, -1)
# Matrix multiply all vectors with the Q matrix
hom_points = torch.bmm(Q_matrix, uvdz)
# Convert from homogenous to euclidian space.
z_points = torch.unsqueeze(hom_points[:, euclidian_observation_ndim], 1)
points = (hom_points / z_points)[:, :euclidian_observation_ndim]
points = points.permute(0, 2, 1)
# Final check that everything went well.
if not points.shape == (batch_size, rows * cols, euclidian_observation_ndim):
raise StereoException(
f"Something went wrong in `reproject_disparity_to_3D`. Expected the final output"
f"to be of shape {(batch_size, rows * cols, euclidian_observation_ndim)}."
f"But the computed point cloud had shape {points.shape}. "
f"Please ensure input are correct. If this is an error, please submit an issue."
)
return points
|
def reproject_disparity_to_3D(disparity_tensor: torch.Tensor, Q_matrix: torch.Tensor) -> torch.Tensor:
"""
Reproject the disparity tensor to a 3D point cloud.
Args:
disparity_tensor (torch.Tensor): Disparity tensor of shape :math:`(B, 1, H, W)`.
Q_matrix (torch.Tensor): Tensor of Q matrices of shapes :math:`(B, 4, 4)`.
Returns:
torch.Tensor: The 3D point cloud of shape :math:`(B, H * W, 3)`
"""
_check_Q_matrix(Q_matrix)
_check_disparity_tensor(disparity_tensor)
batch_size, rows, cols = disparity_tensor.shape
dtype = disparity_tensor.dtype
device = disparity_tensor.device
homogenous_observation_ndim = 4
euclidian_observation_ndim = homogenous_observation_ndim - 1
# Construct a mesh grid of uv values, i.e. the tensors will contain 1:rows and 1:cols.
u, v = torch.meshgrid(
torch.arange(rows, dtype=dtype, device=device), torch.arange(cols, dtype=dtype, device=device)
)
u, v = u.expand(batch_size, -1, -1), v.expand(batch_size, -1, -1)
# The z dimension in homogenous coordinates are just 1.
z = torch.ones((batch_size, rows, cols), dtype=dtype, device=device)
# Stack the observations into a tensor of shape (batch_size, 4, -1) that contains all
# 4 dimensional vectors [u v disparity 1].
uvdz = torch.stack((u, v, disparity_tensor, z), 1).reshape(batch_size, homogenous_observation_ndim, -1)
# Matrix multiply all vectors with the Q matrix
hom_points = torch.bmm(Q_matrix, uvdz)
# Convert from homogenous to euclidian space.
z_points = torch.unsqueeze(hom_points[:, euclidian_observation_ndim], 1)
points = (hom_points / z_points)[:, :euclidian_observation_ndim]
points = points.permute(0, 2, 1)
# Final check that everything went well.
if not points.shape == (batch_size, rows * cols, euclidian_observation_ndim):
raise StereoException(
f"Something went wrong in `reproject_disparity_to_3D`. Expected the final output"
f"to be of shape {(batch_size, rows * cols, euclidian_observation_ndim)}."
f"But the computed point cloud had shape {points.shape}. "
f"Please ensure input are correct. If this is an error, please submit an issue."
)
return points
|
20,771 |
def get_different_messages(messages_original: List[Msg], messages_new: List[Msg]) -> List[Msg]:
# Return messages that have changed in messages_new
different_messages = []
for m_new in messages_new:
for m_original in messages_original:
if m_new.msgstr != m_original.msgstr \
and m_new.msgid == m_original.msgid and m_new.msgctxt == m_original.msgctxt \
and m_new.msgid != 'msgid ""\n':
different_messages.append(m_new)
return different_messages
|
def getDifferentMessages(messages_original: List[Msg], messages_new: List[Msg]) -> List[Msg]:
# Return messages that have changed in messages_new
different_messages = []
for m_new in messages_new:
for m_original in messages_original:
if m_new.msgstr != m_original.msgstr \
and m_new.msgid == m_original.msgid and m_new.msgctxt == m_original.msgctxt \
and m_new.msgid != 'msgid ""\n':
different_messages.append(m_new)
return different_messages
|
28,585 |
def plot_mcse(
idata,
var_names=None,
filter_vars=None,
coords=None,
errorbar=False,
grid=None,
figsize=None,
textsize=None,
extra_methods=False,
rug=False,
rug_kind="diverging",
n_points=20,
labeller=None,
ax=None,
rug_kwargs=None,
extra_kwargs=None,
text_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
**kwargs
):
"""Plot quantile or local Monte Carlo Standard Error.
Parameters
----------
idata: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names, optional
Variables to be plotted. Prefix the variables by ``~`` when you want to exclude
them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords: dict, optional
Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel <xarray:xarray.Dataset.sel>`
errorbar: bool, optional
Plot quantile value +/- mcse instead of plotting mcse.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
extra_methods: bool, optional
Plot mean and sd MCSE as horizontal lines. Only taken into account when
``errorbar=False``.
rug: bool
Plot rug plot of values diverging or that reached the max tree depth.
rug_kind: bool
Variable in sample stats to use as rug mask. Must be a boolean variable.
n_points: int
Number of points for which to plot their quantile/local ess or number of subsets
in the evolution plot.
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
rug_kwargs: dict
kwargs passed to rug plot in
:meth:`mpl:matplotlib.axes.Axes.plot` or :class:`bokeh:bokeh.models.glyphs.Scatter`
extra_kwargs: dict, optional
kwargs passed as extra method lines in
:meth:`mpl:matplotlib.axes.Axes.axhline` or :class:`bokeh:bokeh.models.Span`
text_kwargs: dict, optional
kwargs passed to :meth:`mpl:matplotlib.axes.Axes.annotate` for extra methods lines labels.
It accepts the additional key ``x`` to set ``xy=(text_kwargs["x"], mcse)``
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being passed to
:func:`matplotlib.create_axes_grid` or :func:`bokeh.create_axes_grid`
show: bool, optional
Call backend show function.
**kwargs
Passed as-is to :meth:`mpl:matplotlib.axes.Axes.hist` or :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib depending on the value of `kind`.
Returns
-------
axes: matplotlib axes or bokeh figures
References
----------
* Vehtari et al. (2019) see https://arxiv.org/abs/1903.08008
Examples
--------
Plot quantile Monte Carlo Standard Error.
.. plot::
:context: close-figs
>>> import arviz as az
>>> idata = az.load_arviz_data("centered_eight")
>>> coords = {"school": ["Deerfield", "Lawrenceville"]}
>>> az.plot_mcse(
... idata, var_names=["mu", "theta"], coords=coords
... )
"""
mean_mcse = None
sd_mcse = None
if coords is None:
coords = {}
if "chain" in coords or "draw" in coords:
raise ValueError("chain and draw are invalid coordinates for this kind of plot")
if labeller is None:
labeller = BaseLabeller()
data = get_coords(convert_to_dataset(idata, group="posterior"), coords)
var_names = _var_names(var_names, data, filter_vars)
probs = np.linspace(1 / n_points, 1 - 1 / n_points, n_points)
mcse_dataset = xr.concat(
[mcse(data, var_names=var_names, method="quantile", prob=p) for p in probs], dim="mcse_dim"
)
plotters = filter_plotters_list(
list(xarray_var_iter(mcse_dataset, var_names=var_names, skip_dims={"mcse_dim"})),
"plot_mcse",
)
length_plotters = len(plotters)
rows, cols = default_grid(length_plotters, grid=grid)
if extra_methods:
mean_mcse = mcse(data, var_names=var_names, method="mean")
sd_mcse = mcse(data, var_names=var_names, method="sd")
mcse_kwargs = dict(
ax=ax,
plotters=plotters,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
errorbar=errorbar,
rug=rug,
data=data,
probs=probs,
kwargs=kwargs,
extra_methods=extra_methods,
mean_mcse=mean_mcse,
sd_mcse=sd_mcse,
textsize=textsize,
labeller=labeller,
text_kwargs=text_kwargs,
rug_kwargs=rug_kwargs,
extra_kwargs=extra_kwargs,
idata=idata,
rug_kind=rug_kind,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_mcse", "mcseplot", backend)
ax = plot(**mcse_kwargs)
return ax
|
def plot_mcse(
idata,
var_names=None,
filter_vars=None,
coords=None,
errorbar=False,
grid=None,
figsize=None,
textsize=None,
extra_methods=False,
rug=False,
rug_kind="diverging",
n_points=20,
labeller=None,
ax=None,
rug_kwargs=None,
extra_kwargs=None,
text_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
**kwargs
):
"""Plot quantile or local Monte Carlo Standard Error.
Parameters
----------
idata: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names, optional
Variables to be plotted. Prefix the variables by ``~`` when you want to exclude
them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
coords: dict, optional
Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel <xarray:xarray.Dataset.sel>`
errorbar: bool, optional
Plot quantile value +/- mcse instead of plotting mcse.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
extra_methods: bool, optional
Plot mean and sd MCSE as horizontal lines. Only taken into account when
``errorbar=False``.
rug: bool
Plot rug plot of values diverging or that reached the max tree depth.
rug_kind: bool
Variable in sample stats to use as rug mask. Must be a boolean variable.
n_points: int
Number of points for which to plot their quantile/local ess or number of subsets
in the evolution plot.
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
rug_kwargs: dict
kwargs passed to rug plot in
:meth:`mpl:matplotlib.axes.Axes.plot` or :class:`bokeh:bokeh.models.glyphs.Scatter`
extra_kwargs: dict, optional
kwargs passed as extra method lines in
:meth:`mpl:matplotlib.axes.Axes.axhline` or :class:`bokeh:bokeh.models.Span`
text_kwargs: dict, optional
kwargs passed to :meth:`mpl:matplotlib.axes.Axes.annotate` for extra methods lines labels.
It accepts the additional key ``x`` to set ``xy=(text_kwargs["x"], mcse)``
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
**kwargs
Passed as-is to :meth:`mpl:matplotlib.axes.Axes.hist` or :meth:`mpl:matplotlib.axes.Axes.plot` in matplotlib depending on the value of `kind`.
Returns
-------
axes: matplotlib axes or bokeh figures
References
----------
* Vehtari et al. (2019) see https://arxiv.org/abs/1903.08008
Examples
--------
Plot quantile Monte Carlo Standard Error.
.. plot::
:context: close-figs
>>> import arviz as az
>>> idata = az.load_arviz_data("centered_eight")
>>> coords = {"school": ["Deerfield", "Lawrenceville"]}
>>> az.plot_mcse(
... idata, var_names=["mu", "theta"], coords=coords
... )
"""
mean_mcse = None
sd_mcse = None
if coords is None:
coords = {}
if "chain" in coords or "draw" in coords:
raise ValueError("chain and draw are invalid coordinates for this kind of plot")
if labeller is None:
labeller = BaseLabeller()
data = get_coords(convert_to_dataset(idata, group="posterior"), coords)
var_names = _var_names(var_names, data, filter_vars)
probs = np.linspace(1 / n_points, 1 - 1 / n_points, n_points)
mcse_dataset = xr.concat(
[mcse(data, var_names=var_names, method="quantile", prob=p) for p in probs], dim="mcse_dim"
)
plotters = filter_plotters_list(
list(xarray_var_iter(mcse_dataset, var_names=var_names, skip_dims={"mcse_dim"})),
"plot_mcse",
)
length_plotters = len(plotters)
rows, cols = default_grid(length_plotters, grid=grid)
if extra_methods:
mean_mcse = mcse(data, var_names=var_names, method="mean")
sd_mcse = mcse(data, var_names=var_names, method="sd")
mcse_kwargs = dict(
ax=ax,
plotters=plotters,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
errorbar=errorbar,
rug=rug,
data=data,
probs=probs,
kwargs=kwargs,
extra_methods=extra_methods,
mean_mcse=mean_mcse,
sd_mcse=sd_mcse,
textsize=textsize,
labeller=labeller,
text_kwargs=text_kwargs,
rug_kwargs=rug_kwargs,
extra_kwargs=extra_kwargs,
idata=idata,
rug_kind=rug_kind,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_mcse", "mcseplot", backend)
ax = plot(**mcse_kwargs)
return ax
|
54,293 |
def tmu(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing an two-sided
intervals on the strength parameter, :math:`\mu`.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'tmu test statistic used for fit configuration with POI bounded at zero. Use qmutilde.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
|
def tmu(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing an two-sided
intervals on the strength parameter, :math:`\mu`.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'tmu test statistic used for fit configuration with POI bounded at zero.\n'
+ 'Use the tmu_tilde test statistic (pyhf.infer.test_statistics.tmu_tilde) instead.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
|
32,570 |
def denote_file(args):
should_continue = True
file = demisto.get(args, 'file')
feDone = False
feSubmissionKeys = {}
# profiles = ['win10x64','win7-sp1','win7x64-sp1','winxp-sp3']
profiles = argToList(args['profiles'])
analysistype = args.get('analysistype', 0)
prefetch = args.get('prefetch', 1)
# Make sure fireeye available
if demisto.executeCommand('IsIntegrationAvailable', {'brandname': 'fireeye'})[0]['Contents'] != 'yes':
feDone = True
# Upload file and get submission_key
if not feDone:
bArgs = {'analysistype': analysistype,
'application': '0',
'force': "true",
'prefetch': prefetch,
'priority': "1",
'timeout': "180",
'upload': file}
for profile in profiles:
bArgs['profiles'] = profile
resp = demisto.executeCommand('fe-submit', bArgs)
if isError(resp[0]):
demisto.results(resp)
should_continue = False
break
else:
feSubmissionKey = demisto.get(resp[0], 'Contents')
if isinstance(feSubmissionKey, str):
feSubmissionKey = json.loads(feSubmissionKey)
feSubmissionKeys[profile] = demisto.get(feSubmissionKey[0], 'ID')
else:
demisto.results({"Type": entryTypes["error"], "ContentsFormat": formats["text"],
"Contents": 'FireEye: Integration not available.'})
should_continue = False
if should_continue:
poll_stage(feDone, feSubmissionKeys, profiles, file)
|
def detonate_file(args):
should_continue = True
file = demisto.get(args, 'file')
feDone = False
feSubmissionKeys = {}
# profiles = ['win10x64','win7-sp1','win7x64-sp1','winxp-sp3']
profiles = argToList(args['profiles'])
analysistype = args.get('analysistype', 0)
prefetch = args.get('prefetch', 1)
# Make sure fireeye available
if demisto.executeCommand('IsIntegrationAvailable', {'brandname': 'fireeye'})[0]['Contents'] != 'yes':
feDone = True
# Upload file and get submission_key
if not feDone:
bArgs = {'analysistype': analysistype,
'application': '0',
'force': "true",
'prefetch': prefetch,
'priority': "1",
'timeout': "180",
'upload': file}
for profile in profiles:
bArgs['profiles'] = profile
resp = demisto.executeCommand('fe-submit', bArgs)
if isError(resp[0]):
demisto.results(resp)
should_continue = False
break
else:
feSubmissionKey = demisto.get(resp[0], 'Contents')
if isinstance(feSubmissionKey, str):
feSubmissionKey = json.loads(feSubmissionKey)
feSubmissionKeys[profile] = demisto.get(feSubmissionKey[0], 'ID')
else:
demisto.results({"Type": entryTypes["error"], "ContentsFormat": formats["text"],
"Contents": 'FireEye: Integration not available.'})
should_continue = False
if should_continue:
poll_stage(feDone, feSubmissionKeys, profiles, file)
|
8,899 |
def test_list_parse_legacy_coma():
option = types.ListAttribute('foo')
assert option.parse("""value 1, # value 2, value 3""") == [
'value 1',
'# value 2',
'value 3',
]
|
def test_list_parse_legacy_comma():
option = types.ListAttribute('foo')
assert option.parse("""value 1, # value 2, value 3""") == [
'value 1',
'# value 2',
'value 3',
]
|
47,992 |
def add_page(output_root, parent, *, id=None, path=None, title=None):
if parent.tag == 'tab':
parent.attrib['type'] = 'usergroup'
element = ET.SubElement(parent, 'tab', type='user', title=title, url='@ref ' + id if id else '')
if not path:
assert title, "title must be specified if path isn't"
element.attrib['title'] = title
return element
output_path = output_root / path
output_path.parent.mkdir(parents=True, exist_ok=True)
with (OMZ_ROOT / path).open('r', encoding='utf-8') as input_file:
lines = input_file.readlines()
page = omzdocs.DocumentationPage(''.join(lines))
if page.title is None:
raise RuntimeError(f'{path}: must begin with level 1 heading')
if not title:
title = page.title
element.attrib['title'] = title
# the only way to override the ID that Doxygen gives Markdown pages
# is to add a label to the top-level heading. For simplicity, we hardcode
# the assumption that the file immediately begins with that heading.
if not lines[0].startswith('# '):
raise RuntimeError(f'{path}: line 1 must contain the level 1 heading')
assert id, "id must be specified if path is"
lines[0] = lines[0].rstrip('\n') + f' {{#{id}}}\n'
with (output_root / path).open('w', encoding='utf-8') as output_file:
output_file.writelines(lines)
# copy all referenced images
image_urls = [ref.url for ref in page.external_references() if ref.type == 'image']
for image_url in image_urls:
parsed_image_url = urllib.parse.urlparse(image_url)
if parsed_image_url.scheme or parsed_image_url.netloc:
continue # not a relative URL
image_rel_path = path.parent / urllib.request.url2pathname(parsed_image_url.path)
image_filename = image_rel_path.name
image_abs_path = (OMZ_ROOT / image_rel_path).resolve()
if image_filename in all_images_paths and all_images_paths[image_filename] != image_abs_path:
raise RuntimeError(f'{path}: Image with "{image_filename}" filename already exists. '
f'Rename "{image_rel_path}" to unique name.')
else:
all_images_paths[image_filename] = image_abs_path
(output_root / image_rel_path.parent).mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_abs_path, output_root / image_rel_path)
return element
|
def add_page(output_root, parent, *, id=None, path=None, title=None):
if parent.tag == 'tab':
parent.attrib['type'] = 'usergroup'
element = ET.SubElement(parent, 'tab', type='user', title=title, url='@ref ' + id if id else '')
if not path:
assert title, "title must be specified if path isn't"
element.attrib['title'] = title
return element
output_path = output_root / path
output_path.parent.mkdir(parents=True, exist_ok=True)
with (OMZ_ROOT / path).open('r', encoding='utf-8') as input_file:
lines = input_file.readlines()
page = omzdocs.DocumentationPage(''.join(lines))
if page.title is None:
raise RuntimeError(f'{path}: must begin with level 1 heading')
if not title:
title = page.title
element.attrib['title'] = title
# the only way to override the ID that Doxygen gives Markdown pages
# is to add a label to the top-level heading. For simplicity, we hardcode
# the assumption that the file immediately begins with that heading.
if not lines[0].startswith('# '):
raise RuntimeError(f'{path}: line 1 must contain the level 1 heading')
assert id, "id must be specified if path is"
lines[0] = lines[0].rstrip('\n') + f' {{#{id}}}\n'
with (output_root / path).open('w', encoding='utf-8') as output_file:
output_file.writelines(lines)
# copy all referenced images
image_urls = [ref.url for ref in page.external_references() if ref.type == 'image']
for image_url in image_urls:
parsed_image_url = urllib.parse.urlparse(image_url)
if parsed_image_url.scheme or parsed_image_url.netloc:
continue # not a relative URL
image_rel_path = path.parent / urllib.request.url2pathname(parsed_image_url.path)
image_filename = image_rel_path.name
image_abs_path = (OMZ_ROOT / image_rel_path).resolve()
if image_filename in all_images_paths and all_images_paths[image_filename] != image_abs_path:
raise RuntimeError(f'{path}: Image with "{image_filename}" filename already exists. '
f'Rename "{image_rel_path}" to unique name.')
all_images_paths[image_filename] = image_abs_path
(output_root / image_rel_path.parent).mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_abs_path, output_root / image_rel_path)
return element
|
31,791 |
def collect_entries_data_from_response(parsed_feed_data: FeedParserDict, limit: Union[int, None]) -> List[Dict[str, Any]]:
"""Collects relevant data from the parsed RSS feed entries.
Args:
parsed_feed_data (FeedParserDict): Parsed RSS feed data.
limit (Union[int, None]): Maximum number of results to return.
Returns:
List[Dict[str, Any]]: The data from the RSS feed relevant for the widget.
"""
entries_data: List[Dict[str, Any]] = []
if not parsed_feed_data:
raise DemistoException("Could not parse feed data.")
if not limit:
return entries_data
for entry in reversed(parsed_feed_data.entries):
if entry:
published = email.utils.parsedate(entry.published)
if not published:
continue
published_dt = datetime.fromtimestamp(mktime(published))
published_formatted = published_dt.strftime('%B %-d, %Y %-I:%M %p')
entries_data.append(
{
'timestamp': published_formatted,
'link': entry.get('link'),
'title': entry.get('title'),
'summary': entry.get('summary'),
'author': entry.get('author')
}
)
if limit != 'all':
limit -= 1
if limit == 0:
break
return entries_data
|
def collect_entries_data_from_response(parsed_feed_data: FeedParserDict, limit: Union[int, None]) -> List[Dict[str, Any]]:
"""Collects relevant data from the parsed RSS feed entries.
Args:
parsed_feed_data (FeedParserDict): Parsed RSS feed data.
limit (Union[int, None]): Maximum number of results to return.
Returns:
List[Dict[str, Any]]: The data from the RSS feed relevant for the widget.
"""
entries_data: List[Dict[str, Any]] = []
if not parsed_feed_data:
raise DemistoException("Could not parse feed data.")
if not limit:
return entries_data
for entry in reversed(parsed_feed_data.entries):
if entry:
published = email.utils.parsedate(entry.published)
if not published:
continue
published_dt = datetime.fromtimestamp(mktime(published))
published_formatted = published_dt.strftime('%B %-d, %Y %-I:%M %p')
entries_data.append(
{
'timestamp': published_formatted,
'link': entry.get('link'),
'title': entry.get('title'),
'summary': entry.get('summary'),
'author': entry.get('author'),
}
)
if limit != 'all':
limit -= 1
if limit == 0:
break
return entries_data
|
17,396 |
def to_cftime_datetime(date_str_or_date, calendar=None):
cftime_datetime_base = _import_cftime_datetime_base()
if isinstance(date_str_or_date, str):
if calendar is None:
raise ValueError(
"If converting a string to a cftime datetime object, "
"a calendar type must be provided"
)
date, _ = _parse_iso8601_with_reso(get_date_type(calendar), date_str_or_date)
return date
elif isinstance(date_str_or_date, cftime_datetime_base):
return date_str_or_date
else:
raise TypeError(
"date_str_or_date must be a string or a "
"subclass of cftime.datetime_base. Instead got "
"{!r}.".format(date_str_or_date)
)
|
def to_cftime_datetime(date_str_or_date, calendar=None):
cftime_datetime_base = _import_cftime_datetime_base()
if isinstance(date_str_or_date, str):
if calendar is None:
raise ValueError(
"If converting a string to a cftime datetime object, "
"a calendar type must be provided"
)
date, _ = _parse_iso8601_with_reso(get_date_type(calendar), date_str_or_date)
return date
elif isinstance(date_str_or_date, cftime_datetime_base):
return date_str_or_date
else:
raise TypeError(
"date_str_or_date must be a string or a "
"subclass of cftime.datetime_base (formerly cftime.datetime). Instead got "
"{!r}.".format(date_str_or_date)
)
|
30,920 |
def main():
args = demisto.args()
value = args.get("value")
if type(value) == str:
try:
value = json.loads(value)
except Exception as err:
return_error("The input is not valid JSON")
expression = args.get("expression")
result = jmespath_search(expression, value)
return_results(result)
|
def main():
args = demisto.args()
value = args.get("value")
if isinstance(value, str):
try:
value = json.loads(value)
except Exception as err:
return_error("The input is not valid JSON")
expression = args.get("expression")
result = jmespath_search(expression, value)
return_results(result)
|
17,528 |
def expand_sdk_name(name):
if name in ('latest', 'sdk-latest', 'latest-64bit', 'sdk-latest-64bit'):
# This is effectly the default SDK
return str(find_latest_releases_sdk('upstream'))
elif name in ('latest-fastcomp', 'latest-releases-fastcomp'):
return str(find_latest_releases_sdk('fastcomp'))
elif name in ('latest-upstream', 'latest-clang-upstream', 'latest-releases-upstream'):
return str(find_latest_releases_sdk('upstream'))
elif name in ('tot', 'sdk-tot'):
return str(find_tot_sdk('upstream'))
elif name == 'tot-upstream':
return str(find_tot_sdk('upstream'))
elif name in ('tot-fastcomp', 'sdk-nightly-latest'):
return str(find_tot_sdk('fastcomp'))
else:
# check if it's a release handled by an emscripten-releases version,
# and if so use that by using the right hash. we support a few notations,
# x.y.z[-(upstream|fastcomp_])
# sdk-x.y.z[-(upstream|fastcomp_])-64bit
# TODO: support short notation for old builds too?
backend = None
fullname = name
if '-upstream' in fullname:
fullname = name.replace('-upstream', '')
backend = 'upstream'
elif '-fastcomp' in fullname:
fullname = fullname.replace('-fastcomp', '')
backend = 'fastcomp'
fullname = fullname.replace('sdk-', '').replace('-64bit', '').replace('tag-', '')
releases_info = load_releases_info()['releases']
release_hash = get_release_hash(fullname, releases_info)
if release_hash:
if backend is None:
if version_key(fullname) >= (1, 39, 0):
backend = 'upstream'
else:
backend = 'fastcomp'
if backend == 'fastcomp' and version_key(fullname) >= (2, 0, 0):
print('Note: fastcomp is not longer available in version 2.0.0 and above')
return name
return 'sdk-releases-%s-%s-64bit' % (backend, release_hash)
return name
|
def expand_sdk_name(name):
if name in ('latest', 'sdk-latest', 'latest-64bit', 'sdk-latest-64bit'):
# This is effectly the default SDK
return str(find_latest_releases_sdk('upstream'))
elif name in ('latest-fastcomp', 'latest-releases-fastcomp'):
return str(find_latest_releases_sdk('fastcomp'))
elif name in ('latest-upstream', 'latest-clang-upstream', 'latest-releases-upstream'):
return str(find_latest_releases_sdk('upstream'))
elif name in ('tot', 'sdk-tot'):
return str(find_tot_sdk('upstream'))
elif name == 'tot-upstream':
return str(find_tot_sdk('upstream'))
elif name in ('tot-fastcomp', 'sdk-nightly-latest'):
return str(find_tot_sdk('fastcomp'))
else:
# check if it's a release handled by an emscripten-releases version,
# and if so use that by using the right hash. we support a few notations,
# x.y.z[-(upstream|fastcomp_])
# sdk-x.y.z[-(upstream|fastcomp_])-64bit
# TODO: support short notation for old builds too?
backend = None
fullname = name
if '-upstream' in fullname:
fullname = name.replace('-upstream', '')
backend = 'upstream'
elif '-fastcomp' in fullname:
fullname = fullname.replace('-fastcomp', '')
backend = 'fastcomp'
fullname = fullname.replace('sdk-', '').replace('-64bit', '').replace('tag-', '')
releases_info = load_releases_info()['releases']
release_hash = get_release_hash(fullname, releases_info)
if release_hash:
if backend is None:
if version_key(fullname) >= (1, 39, 0):
backend = 'upstream'
else:
backend = 'fastcomp'
if backend == 'fastcomp' and version_key(fullname) >= (2, 0, 0):
print('Note: fastcomp is not longer available in version 2.0.0 and above. please use either the upstream backend, or an earlier fastcomp version')
return name
return 'sdk-releases-%s-%s-64bit' % (backend, release_hash)
return name
|
30,834 |
def apply_security_profile_command():
pre_post = demisto.args().get('pre_post')
profile_type = demisto.args().get('profile_type')
rule_name = demisto.args().get('rule_name')
profile_name = demisto.args().get('profile_name')
if DEVICE_GROUP:
if not pre_post:
raise Exception('Please provide the pre_post argument when apply profiles to rules in Panorama instance.')
result = apply_security_profile(pre_post, rule_name, profile_type, profile_name)
if result['response']['@status'] == 'success':
demisto.results(f'The profile {profile_name} has been applied to the rule {rule_name}')
else:
demisto.results(result['response']['msg'])
|
def apply_security_profile_command():
pre_post = demisto.args().get('pre_post')
profile_type = demisto.args().get('profile_type')
rule_name = demisto.args().get('rule_name')
profile_name = demisto.args().get('profile_name')
if DEVICE_GROUP:
if not pre_post:
raise Exception('Please provide the pre_post argument when applying profiles to rules in Panorama instance.')
result = apply_security_profile(pre_post, rule_name, profile_type, profile_name)
if result['response']['@status'] == 'success':
demisto.results(f'The profile {profile_name} has been applied to the rule {rule_name}')
else:
demisto.results(result['response']['msg'])
|
46,522 |
def get_max_basic_value(typ: str) -> Any:
if typ == 'bool':
return True
if typ[:4] == 'uint':
size = int(typ[4:])
assert size in (8, 16, 32, 64, 128, 256)
return 2**size - 1
if typ == 'byte':
return 0xff
else:
raise ValueError("Not a basic type")
|
def get_max_basic_value(typ: str) -> Any:
if typ == 'bool':
return True
if typ[:4] == 'uint':
size = int(typ[4:])
assert size in UINT_SIZES
return 2**size - 1
if typ == 'byte':
return 0xff
else:
raise ValueError("Not a basic type")
|
41,308 |
def get_metric_name(metric):
if isinstance(metric, tuple):
names, metric = metric
elif hasattr(metric, '__name__'):
names = metric.__name__
elif hasattr(metric, '__class__'):
names = camel_to_snake(metric.__class__.__name__)
else:
names = 'unknown_metric'
return names, metric
|
def get_metric_name(metric):
if isinstance(metric, tuple):
name, metric = metric
elif hasattr(metric, '__name__'):
names = metric.__name__
elif hasattr(metric, '__class__'):
names = camel_to_snake(metric.__class__.__name__)
else:
names = 'unknown_metric'
return names, metric
|
31,298 |
def get_indicators(client, indicator_type: List[str], limit: int, last_run_id: Optional[str] = None,
tlp_color: Optional[str] = None) -> Tuple[str, list]:
""" Retrieving indicators from the API
Args:
client: OpenCTI Client object.
indicator_type: List of indicators types to return.
last_run_id: The last id from the previous call to use pagination.
limit: the max indicators to fetch
tlp_color: traffic Light Protocol color
Returns:
new_last_run: the id of the last indicator
indicators: list of indicators
"""
indicator_type = build_indicator_list(indicator_type)
observables = client.stix_cyber_observable.list(types=indicator_type, first=limit, after=last_run_id,
withPagination=True)
new_last_run = observables.get('pagination').get('endCursor')
indicators = []
for item in observables.get('entities'):
indicator = {
"value": item['observable_value'],
"type": OPENCTI_TYPES_TO_XSOAR.get(item['entity_type'], item['entity_type']),
"rawJSON": item,
"fields": {
"tags": [tag.get('value') for tag in item.get('objectLabel')],
"description": item.get('x_opencti_description')
}
}
if tlp_color:
indicator['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator)
return new_last_run, indicators
|
def get_indicators(client, indicator_types: List[str], limit: int, last_run_id: Optional[str] = None,
tlp_color: Optional[str] = None) -> Tuple[str, list]:
""" Retrieving indicators from the API
Args:
client: OpenCTI Client object.
indicator_type: List of indicators types to return.
last_run_id: The last id from the previous call to use pagination.
limit: the max indicators to fetch
tlp_color: traffic Light Protocol color
Returns:
new_last_run: the id of the last indicator
indicators: list of indicators
"""
indicator_type = build_indicator_list(indicator_type)
observables = client.stix_cyber_observable.list(types=indicator_type, first=limit, after=last_run_id,
withPagination=True)
new_last_run = observables.get('pagination').get('endCursor')
indicators = []
for item in observables.get('entities'):
indicator = {
"value": item['observable_value'],
"type": OPENCTI_TYPES_TO_XSOAR.get(item['entity_type'], item['entity_type']),
"rawJSON": item,
"fields": {
"tags": [tag.get('value') for tag in item.get('objectLabel')],
"description": item.get('x_opencti_description')
}
}
if tlp_color:
indicator['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator)
return new_last_run, indicators
|
43,700 |
def max_independent_set(graph, constrained=True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the MaxIndependentSet problem,
for a given graph.
The goal of MaxIndependentSet is to find the largest possible independent set of a graph. Given some graph :math:`G`,
an independent set of :math:`G` is a set of vertices such that no two of the vertices in the set share a common edge.
Args:
graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian):
.. UsageDetails::
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in `[arXiv:1709.03489] <https://arxiv.org/abs/1709.03489>`__.
The constrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v}
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state
**Unconstrained**
The unconstrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator
acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
"""
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
if constrained:
return (bit_driver(graph.nodes, 1), qaoa.bit_flip_mixer(graph, 0))
cost_h = edge_driver(graph, ['10', '01', '00']) + bit_driver(graph.nodes, 1)
mixer_h = qaoa.x_mixer(graph.nodes)
return (cost_h, mixer_h)
|
def max_independent_set(graph, constrained=True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the MaxIndependentSet problem,
for a given graph.
The goal of MaxIndependentSet is to find the largest possible independent set of a graph. Given some graph :math:`G`,
an independent set of :math:`G` is a set of vertices such that no two of the vertices in the set share a common edge.
Args:
graph (nx.Graph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian):
.. UsageDetails::
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in `[arXiv:1709.03489] <https://arxiv.org/abs/1709.03489>`__.
The constrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v}
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is `~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state
**Unconstrained**
The unconstrained MaxIndependentSet cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the set of edges of :math:`G`, :math:`V(G)` is the set of vertices, and :math:`Z_i` is the Pauli-Z operator
acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is `~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
"""
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
if constrained:
return (bit_driver(graph.nodes, 1), qaoa.bit_flip_mixer(graph, 0))
cost_h = edge_driver(graph, ['10', '01', '00']) + bit_driver(graph.nodes, 1)
mixer_h = qaoa.x_mixer(graph.nodes)
return (cost_h, mixer_h)
|
14,458 |
def initialize_logger(level=0):
"""Set up the global logging level based on the verbosity number."""
VERBOSITY_MAP = {
0: logging.NOTSET,
1: logging.INFO,
2: logging.DEBUG
}
handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger("ansiblelint")
logger.addHandler(handler)
# unknown logging level is treated as DEBUG
logging_level = VERBOSITY_MAP.get(level, logging.DEBUG)
logger.setLevel(logging_level)
logger.debug("Logging initialized to level %s", logging_level)
|
def initialize_logger(level=0):
"""Set up the global logging level based on the verbosity number."""
VERBOSITY_MAP = {
0: logging.NOTSET,
1: logging.INFO,
2: logging.DEBUG
}
handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger("ansiblelint")
logger.addHandler(handler)
# unknown logging level is treated as DEBUG
logging_level = VERBOSITY_MAP.get(level, logging.DEBUG)
logger.setLevel(logging_level)
_logger.debug("Logging initialized to level %s", logging_level)
|
5,296 |
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss != 1 and m.loss != 2:
raise ValueError("Loss paramter value can be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model != 1 and m.model != 2:
raise ValueError(
"Model parameter value can be either 1 (for Continous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss not in (1, 2):
raise ValueError("Loss paramter value can be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model != 1 and m.model != 2:
raise ValueError(
"Model parameter value can be either 1 (for Continous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
9,104 |
def _complete():
global adobe_glyphs
for i in range(256):
adobe_glyphs[f"/a{i}"] = chr(i)
adobe_glyphs["/.notdef"] = "□"
|
def _complete() -> None:
global adobe_glyphs
for i in range(256):
adobe_glyphs[f"/a{i}"] = chr(i)
adobe_glyphs["/.notdef"] = "□"
|
4,437 |
def _TDDR(signal, sample_rate):
# This function is the reference implementation for the TDDR algorithm for
# motion correction of fNIRS data, as described in:
#
# Fishburn F.A., Ludlum R.S., Vaidya C.J., & Medvedev A.V. (2019).
# Temporal Derivative Distribution Repair (TDDR): A motion correction
# method for fNIRS. NeuroImage, 184, 171-179.
# https://doi.org/10.1016/j.neuroimage.2018.09.025
#
# Usage:
# signals_corrected = TDDR( signals , sample_rate );
#
# Inputs:
# signals: A [sample x channel] matrix of uncorrected optical density or
# hb data
# sample_rate: A scalar reflecting the rate of acquisition in Hz
#
# Outputs:
# signals_corrected: A [sample x channel] matrix of corrected optical
# density data
from scipy.signal import butter, filtfilt
signal = np.array(signal)
if len(signal.shape) != 1:
for ch in range(signal.shape[1]):
signal[:, ch] = _TDDR(signal[:, ch], sample_rate)
return signal
# Preprocess: Separate high and low frequencies
filter_cutoff = .5
filter_order = 3
Fc = filter_cutoff * 2 / sample_rate
signal_mean = np.mean(signal)
signal -= signal_mean
if Fc < 1:
fb, fa = butter(filter_order, Fc)
signal_low = filtfilt(fb, fa, signal, padlen=0)
else:
signal_low = signal
signal_high = signal - signal_low
# Initialize
tune = 4.685
D = np.sqrt(np.finfo(signal.dtype).eps)
mu = np.inf
iter = 0
# Step 1. Compute temporal derivative of the signal
deriv = np.diff(signal_low)
# Step 2. Initialize observation weights
w = np.ones(deriv.shape)
# Step 3. Iterative estimation of robust weights
while iter < 50:
iter = iter + 1
mu0 = mu
# Step 3a. Estimate weighted mean
mu = np.sum(w * deriv) / np.sum(w)
# Step 3b. Calculate absolute residuals of estimate
dev = np.abs(deriv - mu)
# Step 3c. Robust estimate of standard deviation of the residuals
sigma = 1.4826 * np.median(dev)
# Step 3d. Scale deviations by standard deviation and tuning parameter
if sigma == 0:
break
r = dev / (sigma * tune)
# Step 3e. Calculate new weights according to Tukey's biweight function
w = ((1 - r**2) * (r < 1)) ** 2
# Step 3f. Terminate if new estimate is within
# machine-precision of old estimate
if abs(mu - mu0) < D * max(abs(mu), abs(mu0)):
break
# Step 4. Apply robust weights to centered derivative
new_deriv = w * (deriv - mu)
# Step 5. Integrate corrected derivative
signal_low_corrected = np.cumsum(np.insert(new_deriv, 0, 0.0))
# Postprocess: Center the corrected signal
signal_low_corrected = signal_low_corrected - np.mean(signal_low_corrected)
# Postprocess: Merge back with uncorrected high frequency component
signal_corrected = signal_low_corrected + signal_high + signal_mean
return signal_corrected
|
def _TDDR(signal, sample_rate):
# This function is the reference implementation for the TDDR algorithm for
# motion correction of fNIRS data, as described in:
#
# Fishburn F.A., Ludlum R.S., Vaidya C.J., & Medvedev A.V. (2019).
# Temporal Derivative Distribution Repair (TDDR): A motion correction
# method for fNIRS. NeuroImage, 184, 171-179.
# https://doi.org/10.1016/j.neuroimage.2018.09.025
#
# Usage:
# signals_corrected = TDDR( signals , sample_rate );
#
# Inputs:
# signals: A [sample x channel] matrix of uncorrected optical density or
# hemoglobin data
# sample_rate: A scalar reflecting the rate of acquisition in Hz
#
# Outputs:
# signals_corrected: A [sample x channel] matrix of corrected optical
# density data
from scipy.signal import butter, filtfilt
signal = np.array(signal)
if len(signal.shape) != 1:
for ch in range(signal.shape[1]):
signal[:, ch] = _TDDR(signal[:, ch], sample_rate)
return signal
# Preprocess: Separate high and low frequencies
filter_cutoff = .5
filter_order = 3
Fc = filter_cutoff * 2 / sample_rate
signal_mean = np.mean(signal)
signal -= signal_mean
if Fc < 1:
fb, fa = butter(filter_order, Fc)
signal_low = filtfilt(fb, fa, signal, padlen=0)
else:
signal_low = signal
signal_high = signal - signal_low
# Initialize
tune = 4.685
D = np.sqrt(np.finfo(signal.dtype).eps)
mu = np.inf
iter = 0
# Step 1. Compute temporal derivative of the signal
deriv = np.diff(signal_low)
# Step 2. Initialize observation weights
w = np.ones(deriv.shape)
# Step 3. Iterative estimation of robust weights
while iter < 50:
iter = iter + 1
mu0 = mu
# Step 3a. Estimate weighted mean
mu = np.sum(w * deriv) / np.sum(w)
# Step 3b. Calculate absolute residuals of estimate
dev = np.abs(deriv - mu)
# Step 3c. Robust estimate of standard deviation of the residuals
sigma = 1.4826 * np.median(dev)
# Step 3d. Scale deviations by standard deviation and tuning parameter
if sigma == 0:
break
r = dev / (sigma * tune)
# Step 3e. Calculate new weights according to Tukey's biweight function
w = ((1 - r**2) * (r < 1)) ** 2
# Step 3f. Terminate if new estimate is within
# machine-precision of old estimate
if abs(mu - mu0) < D * max(abs(mu), abs(mu0)):
break
# Step 4. Apply robust weights to centered derivative
new_deriv = w * (deriv - mu)
# Step 5. Integrate corrected derivative
signal_low_corrected = np.cumsum(np.insert(new_deriv, 0, 0.0))
# Postprocess: Center the corrected signal
signal_low_corrected = signal_low_corrected - np.mean(signal_low_corrected)
# Postprocess: Merge back with uncorrected high frequency component
signal_corrected = signal_low_corrected + signal_high + signal_mean
return signal_corrected
|
39,173 |
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
return_complex: Optional[bool] = None,
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
return_complex (bool, optional):
Deprecated and not used.
Returns:
Tensor: Dimension `(..., freq, time)`, freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if return_complex is not None:
warnings.warn(
"`return_complex` argument is now deprecated and is not effective."
"`torchaudio.functional.spectrogram(power=None)` always return tensor with "
"complex dtype. Please remove the argument in the function call."
)
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return spec_f
|
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
return_complex: Optional[bool] = None,
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
return_complex (bool, optional):
Deprecated and not used.
Returns:
Tensor: Dimension `(..., freq, time)`, freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if return_complex is not None:
warnings.warn(
"`return_complex` argument is now deprecated and is not effective."
"`torchaudio.functional.spectrogram(power=None)` always returns a tensor with "
"complex dtype. Please remove the argument in the function call."
)
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return spec_f
|
30,933 |
def panorama_refresh_edl(edl_name: str, edl_type: str, location: str, vsys: str):
params = {
'type': 'op',
'key': API_KEY
}
# if refreshing an EDL on the FW
if not edl_type and not location and not vsys:
edl = panorama_get_edl(edl_name)
edl_type = ''.join(edl['type'].keys())
# if refreshing an EDL on the Panorama
else:
if not edl_type or not location or not vsys:
raise Exception('To refresh an EDL from the Firewall on Panorama'
' please use the: edl_type, location an vsys arguments.')
params['cmd'] = f'<request><system><external-list><refresh><type><{edl_type}><name>{edl_name}' \
f'</name></{edl_type}></type></refresh></external-list></system></request>',
if location:
params['location'] = location
if vsys:
params['vsys'] = vsys
result = http_request(
URL,
'POST',
body=params,
)
return result
|
def panorama_refresh_edl(edl_name: str, edl_type: str, location: str, vsys: str):
params = {
'type': 'op',
'key': API_KEY
}
# if refreshing an EDL on the FW
if not edl_type and not location and not vsys:
edl = panorama_get_edl(edl_name)
edl_type = ''.join(edl['type'].keys())
# if refreshing an EDL on the Panorama
else:
if not edl_type or not location or not vsys:
raise Exception('To refresh an EDL from the Firewall on Panorama'
' please use the: edl_type, location and vsys arguments.')
params['cmd'] = f'<request><system><external-list><refresh><type><{edl_type}><name>{edl_name}' \
f'</name></{edl_type}></type></refresh></external-list></system></request>',
if location:
params['location'] = location
if vsys:
params['vsys'] = vsys
result = http_request(
URL,
'POST',
body=params,
)
return result
|
2,894 |
def density(w, **kwargs):
"""Compute density of a sparse vector.
Parameters
----------
w : array-like
The sparse vector.
Returns
-------
float
The density of w, between 0 and 1.
"""
if kwargs:
warnings.warn(
"Key arguments will be removed in one of the upcoming versions",
FutureWarning,
)
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
|
def density(w, **kwargs):
"""Compute density of a sparse vector.
Parameters
----------
w : array-like
The sparse vector.
Returns
-------
float
The density of w, between 0 and 1.
"""
if kwargs:
warnings.warn(
"Additional keyword arguments are deprecated in version 1.2 and will be removed in version 1.4.",
FutureWarning,
)
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
|
24,811 |
def _has_different_parameters_default_value(original, overridden):
"""
Check if original and overridden methods arguments have different default values
Return True if one of the overridden arguments has a default
value different from the default value of the original argument
If one of the method doesn't have argument (.args is None)
return False
"""
if original.args is None or overridden.args is None:
return False
all_args = chain(original.args, original.kwonlyargs)
original_param_names = [param.name for param in all_args]
default_missing = object()
for param_name in original_param_names:
try:
original_default = original.default_value(param_name)
except astroid.exceptions.NoDefault:
original_default = default_missing
try:
overridden_default = overridden.default_value(param_name)
except astroid.exceptions.NoDefault:
overridden_default = default_missing
default_list = [
arg == default_missing for arg in (original_default, overridden_default)
]
if any(default_list) and not all(default_list):
# Only one arg has no default value
return True
astroid_type_comparators = {
nodes.Const: lambda a, b: a.value == b.value,
nodes.ClassDef: lambda a, b: a.name == b.name,
nodes.Tuple: lambda a, b: a.elts == b.elts,
nodes.List: lambda a, b: a.elts == b.elts,
nodes.Dict: lambda a, b: a.items == b.items,
nodes.Name: lambda a, b: set(a.infer()) == set(b.infer()),
}
handled_types = tuple(astroid_type for astroid_type in astroid_type_comparators)
original_type = _get_node_type(original_default, handled_types)
if original_type:
# We handle only astroid types that are inside the dict astroid_type_compared_attr
if not isinstance(overridden_default, original_type):
# Two args with same name but different types
return True
if not astroid_type_comparators[original_type](
original_default, overridden_default
):
# Two args with same type but different values
return True
return False
|
def _has_different_parameters_default_value(original, overridden):
"""
Check if original and overridden methods arguments have different default values
Return True if one of the overridden arguments has a default
value different from the default value of the original argument
If one of the method doesn't have argument (.args is None)
return False
"""
if original.args is None or overridden.args is None:
return False
all_args = chain(original.args, original.kwonlyargs)
original_param_names = [param.name for param in all_args]
default_missing = object()
for param_name in original_param_names:
try:
original_default = original.default_value(param_name)
except astroid.exceptions.NoDefault:
original_default = default_missing
try:
overridden_default = overridden.default_value(param_name)
except astroid.exceptions.NoDefault:
overridden_default = default_missing
default_list = [
arg == default_missing for arg in (original_default, overridden_default)
]
if any(default_list) and not all(default_list):
# Only one arg has no default value
return True
astroid_type_comparators = {
nodes.Const: lambda a, b: a.value == b.value,
nodes.ClassDef: lambda a, b: a.name == b.name,
nodes.Tuple: lambda a, b: a.elts == b.elts,
nodes.List: lambda a, b: a.elts == b.elts,
nodes.Dict: lambda a, b: a.items == b.items,
nodes.Name: lambda a, b: set(a.infer()) == set(b.infer()),
}
original_type = _get_node_type(original_default, astroid_type_comparators.keys())
if original_type:
# We handle only astroid types that are inside the dict astroid_type_compared_attr
if not isinstance(overridden_default, original_type):
# Two args with same name but different types
return True
if not astroid_type_comparators[original_type](
original_default, overridden_default
):
# Two args with same type but different values
return True
return False
|
41,688 |
def get_library_output(line: List[str]):
"""
Check if the command is a linker invocation. If so, return the name of the
output file.
"""
for arg in line:
if arg.endswith(".so") and not arg.startswith("-"):
return arg
|
def get_library_output(line: List[str]) -> Optional[str]:
"""
Check if the command is a linker invocation. If so, return the name of the
output file.
"""
for arg in line:
if arg.endswith(".so") and not arg.startswith("-"):
return arg
|
51,896 |
def _remove(args):
initial_sources = spack.bootstrap.bootstrapping_sources()
names = [s['name'] for s in initial_sources]
if args.name not in names:
msg = ('cannot find any bootstrapping source named "{0}". '
'Run `spack bootstrap list` to see available sources.')
raise RuntimeError(msg.format(args.name))
for current_scope in spack.config.scopes():
sources = spack.config.get('bootstrap:sources', scope=current_scope) or []
if args.name in [s['name'] for s in sources]:
sources = [s for s in sources if s['name'] != args.name]
spack.config.set('bootstrap:sources', sources, scope=current_scope)
msg = ('Removed the bootstrapping source named "{0}" from the '
'"{1}" configuration scope.')
llnl.util.tty.msg(msg.format(args.name, current_scope))
trusted = spack.config.get('bootstrap:trusted', scope=current_scope) or []
if args.name in trusted:
trusted.pop(args.name)
spack.config.set('bootstrap:trusted', trusted, scope=current_scope)
msg = 'Deleting information on "{0}" from list of trusted sources'
llnl.util.tty.msg(msg.format(args.name))
|
def _remove(args):
for current_scope in spack.config.scopes():
sources = spack.config.get('bootstrap:sources', scope=current_scope) or []
if args.name in [s['name'] for s in sources]:
sources = [s for s in sources if s['name'] != args.name]
spack.config.set('bootstrap:sources', sources, scope=current_scope)
msg = ('Removed the bootstrapping source named "{0}" from the '
'"{1}" configuration scope.')
llnl.util.tty.msg(msg.format(args.name, current_scope))
trusted = spack.config.get('bootstrap:trusted', scope=current_scope) or []
if args.name in trusted:
trusted.pop(args.name)
spack.config.set('bootstrap:trusted', trusted, scope=current_scope)
msg = 'Deleting information on "{0}" from list of trusted sources'
llnl.util.tty.msg(msg.format(args.name))
return
# error if we did not find a source of this name
msg = ('cannot find any bootstrapping source named "{0}". '
'Run `spack bootstrap list` to see available sources.')
raise RuntimeError(msg.format(args.name))
|
31,407 |
def fetch_incidents(client: Client, last_run_violation: dict, first_fetch_violation: str, max_results: str):
""" This function will run each interval (default 1 minute)
:type client: client
:param client: Gamma client
:type last_run_violation: dict
:param last_run_violation: last violation ID that was queried from Gamma
:type first_fetch_violation: int
:param first_fetch_violation: if last_violation is None, then begin from this violation ID
:type max_results: int
:param max_results: the max number of violations to pull, bound by MAX_INCIDENTS_TO_FETCH
"""
try:
first_fetch_violation = int(first_fetch_violation)
max_results = int(max_results)
except:
raise ValueError("first_fetch_violation and max_limit must be integers")
if not first_fetch_violation > 0:
raise ValueError("first_fetch_violation must be equal to 1 or higher")
if not max_results > 0:
max_results = 10
elif max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
# get the last violation id fetched, if exists
starting_violation = last_run_violation.get('starting_violation', first_fetch_violation)
most_recent_violation = starting_violation
incidents = []
violations = client.get_violation_list(starting_violation, max_results)
for item in violations['response']:
incident_violation = item['violation_id']
incident_time_ms = item['violation_event_timestamp'] * 1000
if incident_violation <= most_recent_violation:
continue
incident = {
"name": f'Gamma Violation {incident_violation}',
"occurred": timestamp_to_datestring(incident_time_ms),
"rawJSON": json.dumps(item)
}
incidents.append(incident)
# update last run if violation id is greater than last fetch
if incident_violation > most_recent_violation:
most_recent_violation = incident_violation
next_run_violation = {'starting_violation': most_recent_violation}
return next_run_violation, incidents
|
def fetch_incidents(client: Client, last_run_violation: dict, first_fetch_violation: str, max_results: str):
""" This function will run each interval (default 1 minute)
:type client: client
:param client: Gamma client
:type last_run_violation: dict
:param last_run_violation: last violation ID that was queried from Gamma
:type first_fetch_violation: int
:param first_fetch_violation: if last_violation is None, then begin from this violation ID
:type max_results: int
:param max_results: the max number of violations to pull, bound by MAX_INCIDENTS_TO_FETCH
"""
try:
first_fetch_violation = int(first_fetch_violation)
max_results = int(max_results)
except:
raise ValueError("first_fetch_violation and max_limit must be integers")
if first_fetch_violation <= 0:
raise ValueError("first_fetch_violation must be equal to 1 or higher")
if not max_results > 0:
max_results = 10
elif max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
# get the last violation id fetched, if exists
starting_violation = last_run_violation.get('starting_violation', first_fetch_violation)
most_recent_violation = starting_violation
incidents = []
violations = client.get_violation_list(starting_violation, max_results)
for item in violations['response']:
incident_violation = item['violation_id']
incident_time_ms = item['violation_event_timestamp'] * 1000
if incident_violation <= most_recent_violation:
continue
incident = {
"name": f'Gamma Violation {incident_violation}',
"occurred": timestamp_to_datestring(incident_time_ms),
"rawJSON": json.dumps(item)
}
incidents.append(incident)
# update last run if violation id is greater than last fetch
if incident_violation > most_recent_violation:
most_recent_violation = incident_violation
next_run_violation = {'starting_violation': most_recent_violation}
return next_run_violation, incidents
|
30,586 |
def fetch_incidents(first_fetch, client):
"""
Automated fetching of incidents from IllusionBLACK. For first run 2 days is the fixed duration for events.
Args:
first_fetch: For first fetch the timespan to consider to fetch incidents. Example: 2 days, 5 weeks etc
client: IllusionBLACK client
Returns: Demisto Incidents
"""
now = datetime.now(tz=timezone.utc)
demisto.info(f"IllusionBLACK: Fetching incidents at {now}")
demisto_last_run = demisto.getLastRun()
if "last_run" in demisto_last_run:
last_run = datetime.fromisoformat(demisto_last_run["last_run"])
else:
last_run, _ = parse_date_range(first_fetch, utc=False)
if now - last_run < timedelta(minutes=5):
return
from_time = last_run.replace(microsecond=0).isoformat()
to_time = now.replace(microsecond=0).isoformat()
demisto.debug(f"IllusionBLACK: Getting raw events from {from_time} to {to_time}")
events, all_threat_parse = client.get_events(limit=1000, from_time=from_time, to_time=to_time)
raw_incidents = process_events(events, all_threat_parse)
incidents = []
for incident_id, raw_incident in raw_incidents.items():
incidents.append(create_incident(raw_incident))
demisto.setLastRun({"last_run": to_time})
return incidents
|
def fetch_incidents(first_fetch, client):
"""
Automated fetching of incidents from IllusionBLACK. For first run 2 days is the fixed duration for events.
Args:
first_fetch: For first fetch the timespan to consider to fetch incidents. Example: 2 days, 5 weeks etc
client: IllusionBLACK client
Returns: Demisto Incidents
"""
now = datetime.now(tz=timezone.utc)
demisto.info(f"IllusionBLACK: Fetching incidents at {now}")
demisto_last_run = demisto.getLastRun()
if "last_run" in demisto_last_run:
last_run = datetime.fromisoformat(demisto_last_run["last_run"])
else:
last_run, _ = parse_date_range(first_fetch, utc=False)
if now - last_run < timedelta(minutes=5):
return []
from_time = last_run.replace(microsecond=0).isoformat()
to_time = now.replace(microsecond=0).isoformat()
demisto.debug(f"IllusionBLACK: Getting raw events from {from_time} to {to_time}")
events, all_threat_parse = client.get_events(limit=1000, from_time=from_time, to_time=to_time)
raw_incidents = process_events(events, all_threat_parse)
incidents = []
for incident_id, raw_incident in raw_incidents.items():
incidents.append(create_incident(raw_incident))
demisto.setLastRun({"last_run": to_time})
return incidents
|
43,886 |
def _su2su2_to_tensor_products(U):
"""Given a matrix U = A \otimes B in SU(2) x SU(2), extract the two SU(2)
operations A and B.
This process has been described in detail in the Appendix of Coffey & Deiotte
https://link.springer.com/article/10.1007/s11128-009-0156-3
"""
# First, write A = [[a1, a2], [-a2*, a1*]], which we can do for any SU(2) element.
# Then, A \otimes B = [[a1 B, a2 B], [-a2*B, a1*B]] = [[C1, C2], [C3, C4]]
# where the Ci are 2x2 matrices.
C1 = U[0:2, 0:2]
C2 = U[0:2, 2:4]
C3 = U[2:4, 0:2]
C4 = U[2:4, 2:4]
# From the definition of A \otimes B, C1 C4^\dag = a1^2 I, so we can extract a1
C14 = qml.math.dot(C1, qml.math.conj(qml.math.T(C4)))
a1 = qml.math.sqrt(C14[0, 0])
# Similarly, -C2 C3^\dag = a2^2 I, so we can extract a2
C23 = qml.math.dot(C2, qml.math.conj(qml.math.T(C3)))
a2 = qml.math.sqrt(-C23[0, 0])
# This gets us a1, a2 up to a sign. To resolve the sign, ensure that
# C1 C2^dag = a1 a2* I
C12 = qml.math.dot(C1, qml.math.conj(qml.math.T(C2)))
if not qml.math.isclose(a1 * np.conj(a2), C12[0, 0]):
a2 *= -1
# Construct A
A = qml.math.stack([[a1, a2], [-qml.math.conj(a2), qml.math.conj(a1)]])
# Next, extract B. Can do from any of the C, just need to be careful in
# case one of the elements of A is 0.
if not qml.math.isclose(A[0, 0], 0.0, atol=1e-8):
B = C1 / A[0, 0]
else:
B = C2 / A[0, 1]
return A, B
|
def _su2su2_to_tensor_products(U):
r"""Given a matrix :math:`U = A \otimes B` in SU(2) x SU(2), extract the two SU(2)
operations A and B.
This process has been described in detail in the Appendix of Coffey & Deiotte
https://link.springer.com/article/10.1007/s11128-009-0156-3
"""
# First, write A = [[a1, a2], [-a2*, a1*]], which we can do for any SU(2) element.
# Then, A \otimes B = [[a1 B, a2 B], [-a2*B, a1*B]] = [[C1, C2], [C3, C4]]
# where the Ci are 2x2 matrices.
C1 = U[0:2, 0:2]
C2 = U[0:2, 2:4]
C3 = U[2:4, 0:2]
C4 = U[2:4, 2:4]
# From the definition of A \otimes B, C1 C4^\dag = a1^2 I, so we can extract a1
C14 = qml.math.dot(C1, qml.math.conj(qml.math.T(C4)))
a1 = qml.math.sqrt(C14[0, 0])
# Similarly, -C2 C3^\dag = a2^2 I, so we can extract a2
C23 = qml.math.dot(C2, qml.math.conj(qml.math.T(C3)))
a2 = qml.math.sqrt(-C23[0, 0])
# This gets us a1, a2 up to a sign. To resolve the sign, ensure that
# C1 C2^dag = a1 a2* I
C12 = qml.math.dot(C1, qml.math.conj(qml.math.T(C2)))
if not qml.math.isclose(a1 * np.conj(a2), C12[0, 0]):
a2 *= -1
# Construct A
A = qml.math.stack([[a1, a2], [-qml.math.conj(a2), qml.math.conj(a1)]])
# Next, extract B. Can do from any of the C, just need to be careful in
# case one of the elements of A is 0.
if not qml.math.isclose(A[0, 0], 0.0, atol=1e-8):
B = C1 / A[0, 0]
else:
B = C2 / A[0, 1]
return A, B
|
74 |
def get_from_archive_bulk(locator):
"""
Gets a single binary MARC record from within an Archive.org
bulk MARC item, and return the offset and length of the next
item.
If offset or length are `None`, then there is no next record.
:param str locator: Locator ocaid/filename:offset:length
:rtype: (str|None, int|None, int|None)
:return: (Binary MARC data, Next record offset, Next record length)
"""
if locator.startswith('marc:'):
locator = locator[5:]
filename, offset, length = locator.split (":")
offset = int(offset)
length = int(length)
r0, r1 = offset, offset+length-1
# get the next record's length in this request
r1 += 5
url = IA_DOWNLOAD_URL + filename
assert 0 < length < MAX_MARC_LENGTH
f = urlopen_keep_trying(url, {'Range': 'bytes=%d-%d' % (r0, r1)})
data = None
if f:
data = f.read(MAX_MARC_LENGTH)
len_in_rec = int(data[:5])
if len_in_rec != length:
data, next_offset, next_length = get_from_archive_bulk('%s:%d:%d' % (filename, offset, len_in_rec))
else:
next_length = data[length:]
data = data[:length]
if len(next_length) == 5:
# We have data for the next record
next_offset = offset + len_in_rec
next_length = int(next_length)
else:
next_offset = next_length = None
return data, next_offset, next_length
|
def get_from_archive_bulk(locator):
"""
Gets a single binary MARC record from within an Archive.org
bulk MARC item, and return the offset and length of the next
item.
If offset or length are `None`, then there is no next record.
:param str locator: Locator ocaid/filename:offset:length
:rtype: (str|None, int|None, int|None)
:return: (Binary MARC data, Next record offset, Next record length)
"""
if locator.startswith('marc:'):
locator = locator[5:]
filename, offset, length = locator.split (":")
offset = int(offset)
length = int(length)
r0, r1 = offset, offset+length-1
# get the next record's length in this request
r1 += 5
url = IA_DOWNLOAD_URL + filename
assert 0 < length < MAX_MARC_LENGTH
f = urlopen_keep_trying(url, params={'Range': 'bytes=%d-%d' % (r0, r1)})
data = None
if f:
data = f.read(MAX_MARC_LENGTH)
len_in_rec = int(data[:5])
if len_in_rec != length:
data, next_offset, next_length = get_from_archive_bulk('%s:%d:%d' % (filename, offset, len_in_rec))
else:
next_length = data[length:]
data = data[:length]
if len(next_length) == 5:
# We have data for the next record
next_offset = offset + len_in_rec
next_length = int(next_length)
else:
next_offset = next_length = None
return data, next_offset, next_length
|
46,584 |
def test_pickle_flags_consistency() -> None:
cfg = DictConfig({"a": 0})
cfg._set_flag("test", True)
assert cfg._get_node("a")._get_flag("test") # type: ignore
cfg2 = pickle.loads(pickle.dumps(cfg))
cfg2._set_flag("test", None)
assert cfg2._get_flag("test") is None
assert cfg2._get_node("a")._get_flag("test") is None
|
def test_pickle_flags_consistency() -> None:
cfg = OmegaConf.create({"a": 0})
cfg._set_flag("test", True)
assert cfg._get_node("a")._get_flag("test") # type: ignore
cfg2 = pickle.loads(pickle.dumps(cfg))
cfg2._set_flag("test", None)
assert cfg2._get_flag("test") is None
assert cfg2._get_node("a")._get_flag("test") is None
|
5,569 |
def test_parcel_profile_lcl_not_monotonic():
"""Test parcel profile with lcl calculation."""
with pytest.raises(InvalidSoundingError):
p = np.array([1004., 1000., 943., 925., 928., 850., 839., 749., 700.]) * units.hPa
t = np.array([24.2, 24., 20.2, 21.6, 21.4, 20.4, 20.2, 14.4, 13.2]) * units.degC
td = np.array([21.9, 22.1, 19.2, 20.5, 20.4, 18.4, 17.4, 8.4, -2.8]) * units.degC
pressure, temp, dewp, prof = parcel_profile_with_lcl(p, t, td)
|
def test_parcel_profile_lcl_not_monotonic():
"""Test parcel profile with lcl calculation."""
with pytest.raises(InvalidSoundingError):
p = np.array([1004., 1000., 943., 925., 928., 850., 839., 749., 700.]) * units.hPa
t = np.array([24.2, 24., 20.2, 21.6, 21.4, 20.4, 20.2, 14.4, 13.2]) * units.degC
td = np.array([21.9, 22.1, 19.2, 20.5, 20.4, 18.4, 17.4, 8.4, -2.8]) * units.degC
_ = parcel_profile_with_lcl(p, t, td)
|
34,567 |
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
validate=io_utils.file_type_validator(
[".md", ".yml"],
"Please provide a valid export path for the stories, e.g. 'stories.md'.",
),
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
validate=io_utils.file_type_validator(
[".md", ".json", ".yml"],
"Please provide a valid export path for the NLU data, e.g. 'nlu.md'.",
),
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
validate=io_utils.file_type_validator(
[".yml", ".yaml"],
"Please provide a valid export path for the domain file, e.g. 'domain.yml'.",
),
),
)
answers = questions.ask()
if not answers:
raise Abort()
return answers["export_stories"], answers["export_nlu"], answers["export_domain"]
|
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
validate=io_utils.file_type_validator(
[".md", ".yml"],
"Please provide a valid export path for the stories, e.g. 'stories.yml'.",
),
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
validate=io_utils.file_type_validator(
[".md", ".json", ".yml"],
"Please provide a valid export path for the NLU data, e.g. 'nlu.md'.",
),
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
validate=io_utils.file_type_validator(
[".yml", ".yaml"],
"Please provide a valid export path for the domain file, e.g. 'domain.yml'.",
),
),
)
answers = questions.ask()
if not answers:
raise Abort()
return answers["export_stories"], answers["export_nlu"], answers["export_domain"]
|
43,997 |
def edges_to_wires(graph) -> Dict[Tuple, int]:
r"""Maps the edges of a graph to corresponding wires.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
>>> g = rx.generators.directed_mesh_graph(4, [0,1,2,3])
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
Args:
graph (nx.Graph or rx.Py(Di)Graph): the graph specifying possible edges
Returns:
Dict[Tuple, int]: a mapping from graph edges to wires
"""
if isinstance(graph, nx.Graph):
return {edge: i for i, edge in enumerate(graph.edges)}
elif isinstance(graph, (rx.PyGraph, rx.PyDiGraph)):
gnodes = graph.nodes()
return {
(gnodes.index(e[0]), gnodes.index(e[1])): i
for i, e in enumerate(sorted(graph.edge_list()))
}
raise ValueError(f"Input graph must be a nx.Graph, rx.Py(Di)Graph, got {type(graph).__name__}")
|
def edges_to_wires(graph) -> Dict[Tuple, int]:
r"""Maps the edges of a graph to corresponding wires.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
>>> g = rx.generators.directed_mesh_graph(4, [0,1,2,3])
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
Args:
graph (nx.Graph or rx.PyGraph or rx.PyDiGraph): the graph specifying possible edges
Returns:
Dict[Tuple, int]: a mapping from graph edges to wires
"""
if isinstance(graph, nx.Graph):
return {edge: i for i, edge in enumerate(graph.edges)}
elif isinstance(graph, (rx.PyGraph, rx.PyDiGraph)):
gnodes = graph.nodes()
return {
(gnodes.index(e[0]), gnodes.index(e[1])): i
for i, e in enumerate(sorted(graph.edge_list()))
}
raise ValueError(f"Input graph must be a nx.Graph, rx.Py(Di)Graph, got {type(graph).__name__}")
|
2,621 |
def _preprocess_data(
X,
y,
fit_intercept,
normalize=False,
copy=True,
sample_weight=None,
return_mean=False,
check_input=True,
):
"""Center and scale data.
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered. This function also systematically makes y consistent with X.dtype
Returns
-------
X_out : {ndarray, sparse matrix} of shape (n_samples, n_features)
If copy=True a copy of the input X is triggered, otherwise operations are
inplace.
If input X is dense, then X_out is centered.
If normalize is True, then X_out is rescaled (dense and sparse case)
y : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
Centered version of y. Likely performed inplace on input y.
X_offset : ndarray of shape (n_features,)
The mean per column of input X.
y_offset : float or ndarray of shape (n_features,)
X_scale : ndarray of shape (n_features,)
The standard deviation per column of input X.
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=copy, accept_sparse=["csr", "csc"], dtype=FLOAT_DTYPES)
elif copy:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order="K")
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight)
if not return_mean:
X_offset[:] = X.dtype.type(0)
else:
if normalize:
X_offset, X_var, _ = _incremental_mean_and_var(
X,
last_mean=0.0,
last_variance=0.0,
last_sample_count=0.0,
sample_weight=sample_weight,
)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X_offset = X_offset.astype(X.dtype, copy=False)
X -= X_offset
if normalize:
X_var = X_var.astype(X.dtype, copy=False)
# Detect constant features on the computed variance, before taking
# the np.sqrt. Otherwise constant features cannot be detected with
# sample weights.
constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0])
if sample_weight is None:
X_var *= X.shape[0]
else:
X_var *= sample_weight.sum()
X_scale = np.sqrt(X_var, out=X_var)
X_scale[constant_mask] = 1.0
if sp.issparse(X):
inplace_column_scale(X, 1.0 / X_scale)
else:
X /= X_scale
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
|
def _preprocess_data(
X,
y,
fit_intercept,
normalize=False,
copy=True,
sample_weight=None,
return_mean=False,
check_input=True,
):
"""Center and scale data.
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered. This function also systematically makes y consistent with X.dtype
Returns
-------
X_out : {ndarray, sparse matrix} of shape (n_samples, n_features)
If copy=True a copy of the input X is triggered, otherwise operations are
inplace.
If input X is dense, then X_out is centered.
If normalize is True, then X_out is rescaled (dense and sparse case)
y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
Centered version of y. Likely performed inplace on input y.
X_offset : ndarray of shape (n_features,)
The mean per column of input X.
y_offset : float or ndarray of shape (n_features,)
X_scale : ndarray of shape (n_features,)
The standard deviation per column of input X.
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=copy, accept_sparse=["csr", "csc"], dtype=FLOAT_DTYPES)
elif copy:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order="K")
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight)
if not return_mean:
X_offset[:] = X.dtype.type(0)
else:
if normalize:
X_offset, X_var, _ = _incremental_mean_and_var(
X,
last_mean=0.0,
last_variance=0.0,
last_sample_count=0.0,
sample_weight=sample_weight,
)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X_offset = X_offset.astype(X.dtype, copy=False)
X -= X_offset
if normalize:
X_var = X_var.astype(X.dtype, copy=False)
# Detect constant features on the computed variance, before taking
# the np.sqrt. Otherwise constant features cannot be detected with
# sample weights.
constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0])
if sample_weight is None:
X_var *= X.shape[0]
else:
X_var *= sample_weight.sum()
X_scale = np.sqrt(X_var, out=X_var)
X_scale[constant_mask] = 1.0
if sp.issparse(X):
inplace_column_scale(X, 1.0 / X_scale)
else:
X /= X_scale
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
|
35,069 |
def generate_project_from_mlf(
template_project_dir: typing.Union[pathlib.Path, str],
project_dir: typing.Union[pathlib.Path, str],
mlf: typing.Union[pathlib.Path, str],
options: dict,
):
"""Generate a project from a platform template and an existing MLF.
Parameters
----------
template_project_path : pathlib.Path or str
Path to a template project containing a microTVM Project API server.
project_dir : pathlib.Path or str
Path to a directory where the project will be created.
mlf : pathlib.Path or str
Path to the Model Library Format archive that will be used when creating
the new project.
options : dict
Project API options given to the microTVM API server for the specified platform.
Returns
-------
GeneratedProject :
A class that wraps the generated project and which can be used to further interact with it.
"""
template = TemplateProject.from_directory(str(template_project_dir))
return template.generate_project_from_mlf(str(mlf), str(project_dir), options)
|
def generate_project_from_mlf(
template_project_dir: typing.Union[pathlib.Path, str],
project_dir: typing.Union[pathlib.Path, str],
mlf: typing.Union[pathlib.Path, str],
options: dict,
):
"""Generate a project from a platform template and an existing Model Library Format (MLF) archive.
Parameters
----------
template_project_path : pathlib.Path or str
Path to a template project containing a microTVM Project API server.
project_dir : pathlib.Path or str
Path to a directory where the project will be created.
mlf : pathlib.Path or str
Path to the Model Library Format archive that will be used when creating
the new project.
options : dict
Project API options given to the microTVM API server for the specified platform.
Returns
-------
GeneratedProject :
A class that wraps the generated project and which can be used to further interact with it.
"""
template = TemplateProject.from_directory(str(template_project_dir))
return template.generate_project_from_mlf(str(mlf), str(project_dir), options)
|
32,574 |
def is_html_response(response):
if 'text\html' in response.headers.get('Content-Type', ''):
return True
# look for an html tag in the response text
# if re.search("<[^>]+>", response.text):
# return True
return False
|
def is_html_response(response):
if 'text\html' in response.headers.get('Content-Type', '').lower():
return True
# look for an html tag in the response text
# if re.search("<[^>]+>", response.text):
# return True
return False
|
37,651 |
def get_entangler_map(
num_block_qubits: int,
num_circuit_qubits: int,
entanglement: str,
entanglement_gates: List[Tuple] = None,
offset: int = 0,
) -> List[Sequence[int]]:
"""Get an entangler map for an arbitrary number of qubits.
Args:
num_block_qubits: The number of qubits of the entangling block.
num_circuit_qubits: The number of qubits of the circuit.
entanglement: The entanglement strategy.
entanglement_gates: List of tuples of entanglement gates and their counts. Used for optimization.
offset: The block offset, can be used if the entanglements differ per block.
See mode ``sca`` for instance.
Returns:
The entangler map using mode ``entanglement`` to scatter a block of ``num_block_qubits``
qubits on ``num_circuit_qubits`` qubits.
Raises:
ValueError: If the entanglement mode ist not supported.
"""
n, m = num_circuit_qubits, num_block_qubits
if m > n:
raise ValueError(
"The number of block qubits must be smaller or equal to the number of "
"qubits in the circuit."
)
if entanglement == "pairwise" and num_block_qubits != 2:
raise ValueError("Pairwise entanglement is only defined for blocks of 2 qubits.")
if entanglement == "full" or entanglement == "full_explicit":
# Optimization for CX entanglement_block of size 2, containing only 'cx' gates
if entanglement == "full" and m == 2 and entanglement_gates == [("cx", 1)]:
return [(n - i - 2, n - i - 1) for i in range(n - 1)]
return list(combinations(list(range(n)), m))
if entanglement in ["linear", "circular", "sca", "pairwise"]:
linear = [tuple(range(i, i + m)) for i in range(n - m + 1)]
# if the number of block qubits is 1, we don't have to add the 'circular' part
if entanglement == "linear" or m == 1:
return linear
if entanglement == "pairwise":
return linear[::2] + linear[1::2]
# circular equals linear plus top-bottom entanglement (if there's space for it)
if n > m:
circular = [tuple(range(n - m + 1, n)) + (0,)] + linear
else:
circular = linear
if entanglement == "circular":
return circular
# sca is circular plus shift and reverse
shifted = circular[-offset:] + circular[:-offset]
if offset % 2 == 1: # if odd, reverse the qubit indices
sca = [ind[::-1] for ind in shifted]
else:
sca = shifted
return sca
else:
raise ValueError(f"Unsupported entanglement type: {entanglement}")
|
def get_entangler_map(
num_block_qubits: int,
num_circuit_qubits: int,
entanglement: str,
entanglement_gates: Optional[List[Tuple[str, int]]] = None,
offset: int = 0,
) -> List[Sequence[int]]:
"""Get an entangler map for an arbitrary number of qubits.
Args:
num_block_qubits: The number of qubits of the entangling block.
num_circuit_qubits: The number of qubits of the circuit.
entanglement: The entanglement strategy.
entanglement_gates: List of tuples of entanglement gates and their counts. Used for optimization.
offset: The block offset, can be used if the entanglements differ per block.
See mode ``sca`` for instance.
Returns:
The entangler map using mode ``entanglement`` to scatter a block of ``num_block_qubits``
qubits on ``num_circuit_qubits`` qubits.
Raises:
ValueError: If the entanglement mode ist not supported.
"""
n, m = num_circuit_qubits, num_block_qubits
if m > n:
raise ValueError(
"The number of block qubits must be smaller or equal to the number of "
"qubits in the circuit."
)
if entanglement == "pairwise" and num_block_qubits != 2:
raise ValueError("Pairwise entanglement is only defined for blocks of 2 qubits.")
if entanglement == "full" or entanglement == "full_explicit":
# Optimization for CX entanglement_block of size 2, containing only 'cx' gates
if entanglement == "full" and m == 2 and entanglement_gates == [("cx", 1)]:
return [(n - i - 2, n - i - 1) for i in range(n - 1)]
return list(combinations(list(range(n)), m))
if entanglement in ["linear", "circular", "sca", "pairwise"]:
linear = [tuple(range(i, i + m)) for i in range(n - m + 1)]
# if the number of block qubits is 1, we don't have to add the 'circular' part
if entanglement == "linear" or m == 1:
return linear
if entanglement == "pairwise":
return linear[::2] + linear[1::2]
# circular equals linear plus top-bottom entanglement (if there's space for it)
if n > m:
circular = [tuple(range(n - m + 1, n)) + (0,)] + linear
else:
circular = linear
if entanglement == "circular":
return circular
# sca is circular plus shift and reverse
shifted = circular[-offset:] + circular[:-offset]
if offset % 2 == 1: # if odd, reverse the qubit indices
sca = [ind[::-1] for ind in shifted]
else:
sca = shifted
return sca
else:
raise ValueError(f"Unsupported entanglement type: {entanglement}")
|
29,044 |
def _log_agent_binary_hashes(agent_binary_repository: IAgentBinaryRepository):
"""
Logs all the hashes of the agent executables for debbuging ease
:param agent_binary_repository: Used to retrieve the agent binaries
"""
AGENT_BINARIES = {
"Linux": agent_binary_repository.get_linux_binary(),
"Windows": agent_binary_repository.get_windows_binary(),
}
for os, binary in AGENT_BINARIES.items():
binary_sha256_hash = get_binary_sha256_hash(binary)
logger.debug(f"{os}: SHA-256 hash: {binary_sha256_hash}")
|
def _log_agent_binary_hashes(agent_binary_repository: IAgentBinaryRepository):
"""
Logs all the hashes of the agent executables for debbuging ease
:param agent_binary_repository: Used to retrieve the agent binaries
"""
AGENT_BINARIES = {
"Linux": agent_binary_repository.get_linux_binary(),
"Windows": agent_binary_repository.get_windows_binary(),
}
for os, binary in AGENT_BINARIES.items():
binary_sha256_hash = get_binary_sha256_hash(binary)
logger.debug(f"{os} agent: SHA-256 hash: {binary_sha256_hash}")
|
32,167 |
def get_test_response(client, method, token):
"""
Test the integration state
:param client: client instance
:param method: Requests method to be used
:param token: API access token
:return: test response
"""
params = {
'token': token
}
eventtypes_url = r'/api/v2/events/types'
eventTypes = client.get_event_types(method, eventtypes_url, params)
if eventTypes is not None:
return 'ok'
else:
demisto.error("Failed to connect")
return 'fail'
|
def get_test_response(client, method, token):
"""
Test the integration state
:param client: client instance
:param method: Requests method to be used
:param token: API access token
:return: test response
"""
params = {
'token': token
}
eventtypes_url = r'/api/v2/events/types'
eventTypes = client.get_event_types(method, eventtypes_url, params)
if eventTypes:
return 'ok'
else:
demisto.error("Failed to connect")
return 'fail'
|
5,029 |
def date2num(d):
"""
Convert datetime and timedelta objects to Matplotlib dates.
Supported:
- `numpy.datetime64`/`numpy.timedelta64` (both including 'nat')
- `datetime.datetime`/`datetime.timedelta`
- `pandas.Timestamp`/`pandas.Timedelta`/`pandas.NaT`
Parameters
----------
d : ``datetime-like`` or ``timedelta-like`` or sequences of these
Returns
-------
float or sequence of floats
For dates:
Number of days since the epoch. See `.get_epoch` for the
epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`. If
the epoch is "1970-01-01T00:00:00" (default) then noon Jan 1 1970
("1970-01-01T12:00:00") returns 0.5.
For timedeltas:
Number of days as float
Notes
-----
The Gregorian calendar is assumed; this is not universal practice.
For details see the module docstring.
"""
if hasattr(d, "values"):
# this unpacks pandas series or dataframes...
d = d.values
# make an iterable, but save state to unpack later:
iterable = np.iterable(d)
if not iterable:
d = [d]
d = np.asarray(d)
if np.issubdtype(d.dtype, np.timedelta64):
# numpy converts this to datetime but not the way we want
d = d.astype('timedelta64[us]')
elif not np.issubdtype(d.dtype, np.datetime64):
# handle everything that is not np.timedelta64
if not d.size:
# deals with an empty array...
return d
tzi = getattr(d[0], 'tzinfo', None)
if tzi is not None:
# make datetime naive:
d = [dt.astimezone(UTC).replace(tzinfo=None) for dt in d]
d = np.asarray(d)
# try conversion to datetime or timedelta
for _dtype in ('datetime64[us]', 'timedelta64[us]'):
try:
d = d.astype(_dtype)
break
except ValueError:
continue
else:
# maybe the sequence contained pandas.NaT
first = cbook.safe_first_element(d)
if hasattr(first, 'to_numpy'):
# assume sequence of pandas objects including pandas.NaT
# element-wise conversion is required
d = _try_pandas_nat_conversion(d)
# convert timedelta to datetime now for internal handling
if np.issubdtype(d.dtype, np.timedelta64):
epoch = np.full_like(d, np.datetime64(get_epoch()))
d = (epoch + d).astype('datetime64[us]')
d = _dt64_to_ordinalf(d)
return d if iterable else d[0]
|
def date2num(d):
"""
Convert datetime and timedelta objects to Matplotlib dates.
Supported:
- `numpy.datetime64`/`numpy.timedelta64` (both including 'nat')
- `datetime.datetime`/`datetime.timedelta`
- `pandas.Timestamp`/`pandas.Timedelta`/`pandas.NaT`
Parameters
----------
d : ``datetime-like`` or ``timedelta-like`` or sequences of these
Returns
-------
float or sequence of floats
For dates:
Number of days since the epoch. See `.get_epoch` for the
epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`. If
the epoch is "1970-01-01T00:00:00" (default) then noon Jan 1 1970
("1970-01-01T12:00:00") returns 0.5.
For timedeltas:
Number of days as float
Notes
-----
The Gregorian calendar is assumed; this is not universal practice.
For details see the module docstring.
"""
if hasattr(d, "values"):
# this unpacks pandas series or dataframes...
d = d.values
# make an iterable, but save state to unpack later:
iterable = np.iterable(d)
if not iterable:
d = [d]
d = np.asarray(d)
if not np.issubdtype(d.dtype, np.datetime64):
# handle everything that is not np.timedelta64
if not d.size:
# deals with an empty array...
return d
tzi = getattr(d[0], 'tzinfo', None)
if tzi is not None:
# make datetime naive:
d = [dt.astimezone(UTC).replace(tzinfo=None) for dt in d]
d = np.asarray(d)
# try conversion to datetime or timedelta
for _dtype in ('datetime64[us]', 'timedelta64[us]'):
try:
d = d.astype(_dtype)
break
except ValueError:
continue
else:
# maybe the sequence contained pandas.NaT
first = cbook.safe_first_element(d)
if hasattr(first, 'to_numpy'):
# assume sequence of pandas objects including pandas.NaT
# element-wise conversion is required
d = _try_pandas_nat_conversion(d)
# convert timedelta to datetime now for internal handling
if np.issubdtype(d.dtype, np.timedelta64):
epoch = np.full_like(d, np.datetime64(get_epoch()))
d = (epoch + d).astype('datetime64[us]')
d = _dt64_to_ordinalf(d)
return d if iterable else d[0]
|
1,262 |
def main(args=None):
"""Main program function."""
parser = _get_parser()
opts = parser.parse_args(args)
from_img = load(opts.infile)
if opts.Volume:
computed_volume = mask_volume(from_img, opts.units)
print(computed_volume)
return computed_volume
|
def main(args=None):
"""Main program function."""
parser = _get_parser()
opts = parser.parse_args(args)
from_img = load(opts.infile)
if opts.Volume:
computed_volume = mask_volume(from_img, opts.units)
print(computed_volume)
return 0
|
25,920 |
def _validate_admin_password(password, os_type):
import re
is_linux = (os_type.lower() == 'linux')
max_length = 72 if is_linux else 123
min_length = 12
error_msg = ("Rule 1: The password length must be betwween {} and {}\n"
"Rule 2: Password must have the 3 of the following: 1 lower case character, "
"1 upper case character, 1 number and 1 special character").format(min_length, max_length)
if len(password) not in range(min_length, max_length + 1):
raise CLIError("Your password is invalid for it violates Rule 1\n{}".format(error_msg))
contains_lower = re.findall('[a-z]+', password)
contains_upper = re.findall('[A-Z]+', password)
contains_digit = re.findall('[0-9]+', password)
contains_special_char = re.findall(r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password)
count = len([x for x in [contains_lower, contains_upper,
contains_digit, contains_special_char] if x])
# pylint: disable=line-too-long
if count < 3:
raise CLIError("Your password is invalid for it violates Rule 2\n{}".format(error_msg))
|
def _validate_admin_password(password, os_type):
import re
is_linux = (os_type.lower() == 'linux')
max_length = 72 if is_linux else 123
min_length = 12
error_msg = ("Rule 1: The password length must be between {} and {}\n"
"Rule 2: Password must have the 3 of the following: 1 lower case character, "
"1 upper case character, 1 number and 1 special character").format(min_length, max_length)
if len(password) not in range(min_length, max_length + 1):
raise CLIError("Your password is invalid for it violates Rule 1\n{}".format(error_msg))
contains_lower = re.findall('[a-z]+', password)
contains_upper = re.findall('[A-Z]+', password)
contains_digit = re.findall('[0-9]+', password)
contains_special_char = re.findall(r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password)
count = len([x for x in [contains_lower, contains_upper,
contains_digit, contains_special_char] if x])
# pylint: disable=line-too-long
if count < 3:
raise CLIError("Your password is invalid for it violates Rule 2\n{}".format(error_msg))
|
278 |
def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0,
chains=None, cores=None, tune=500, progressbar=True,
model=None, random_seed=None, discard_tuned_samples=True,
compute_convergence_checks=True, **kwargs):
"""Draw samples from the posterior using the given step methods.
Multiple step methods are supported via compound step methods.
Parameters
----------
draws : int
The number of samples to draw. Defaults to 500. The number of tuned samples are discarded
by default. See `discard_tuned_samples`.
step : function or iterable of functions
A step function or collection of functions. If there are variables without a step methods,
step methods for those variables will be assigned automatically.
init : str
Initialization method to use for auto-assigned NUTS samplers.
* auto : Choose a default initialization method automatically.
Currently, this is `'jitter+adapt_diag'`, but this can change in the future.
If you depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the
starting point in each chain.
* advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi : Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map : Use the MAP as starting point. This is discouraged.
* nuts : Run NUTS and estimate posterior mean and mass matrix from the trace.
n_init : int
Number of iterations of initializer. Only works for 'nuts' and 'ADVI'.
If 'ADVI', number of iterations, if 'nuts', number of draws.
start : dict, or array of dict
Starting point in parameter space (or partial point)
Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not
(defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can
overwrite the default. For 'SMC' step method, `start` should be a list of dicts
of length = `chains`.
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number `chain`. If None or a list of variables, the NDArray backend is used.
Passing either "text" or "sqlite" is taken as a shortcut to set up the corresponding
backend (with "mcmc" used as the base name). Ignored when using 'SMC' as step method.
chain_idx : int
Chain number used to store sample in backend. If `chains` is greater than one, chain
numbers will start here. Ignored when using 'SMC' as step method.
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics and can also reveal multiple modes in the posterior. If `None`,
then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the
number of draws.
cores : int
The number of chains to run in parallel. If `None`, set to the number of CPUs in the
system, but at most 4. When using 'SMC', this parameter will be ignored if running with
`pm.SMC(parallel=False)`. Keep in mind that
some chains might themselves be multithreaded via openmp or BLAS. In those cases it might
be faster to set this to 1.
tune : int
Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust
the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition
to the number specified in the `draws` argument, and will be discarded unless
`discard_tuned_samples` is set to False.
progressbar : bool
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
model : Model (optional if in `with` context)
random_seed : int or list of ints
A list is accepted if `cores` is greater than one.
discard_tuned_samples : bool
Whether to discard posterior samples of the tune interval. Ignored when using 'SMC'
compute_convergence_checks : bool, default=True
Whether to compute sampler statistics like Gelman-Rubin and `effective_n`.
Ignored when using 'SMC'
Returns
-------
trace : pymc3.backends.base.MultiTrace
A `MultiTrace` object that contains the samples.
Notes
-----
Optional keyword arguments can be passed to `sample` to be delivered to the
`step_method`s used during sampling. In particular, the NUTS step method accepts
a number of arguments. Common options are:
* target_accept: float in [0, 1]. The step size is tuned such that we approximate this
acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic
posteriors.
* max_treedepth: The maximum depth of the trajectory tree.
* step_scale: float, default 0.25
The initial guess for the step size scaled down by :math:`1/n**(1/4)`
You can find a full list of arguments in the docstring of the step methods.
Examples
--------
.. code:: ipython
>>> import pymc3 as pm
... n = 100
... h = 61
... alpha = 2
... beta = 2
.. code:: ipython
>>> with pm.Model() as model: # context management
... p = pm.Beta('p', alpha=alpha, beta=beta)
... y = pm.Binomial('y', n=n, p=p, observed=h)
... trace = pm.sample(2000, tune=1000, cores=4)
>>> pm.summary(trace)
mean sd mc_error hpd_2.5 hpd_97.5
p 0.604625 0.047086 0.00078 0.510498 0.694774
"""
model = modelcontext(model)
nuts_kwargs = kwargs.pop('nuts_kwargs', None)
if nuts_kwargs is not None:
warnings.warn("The nuts_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(nuts_kwargs)
step_kwargs = kwargs.pop('step_kwargs', None)
if step_kwargs is not None:
warnings.warn("The step_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(step_kwargs)
if cores is None:
cores = min(4, _cpu_count())
if isinstance(step, pm.step_methods.smc.SMC):
trace = smc.sample_smc(draws=draws,
step=step,
start=start,
cores=cores,
progressbar=progressbar,
model=model,
random_seed=random_seed)
else:
if 'njobs' in kwargs:
cores = kwargs['njobs']
warnings.warn(
"The njobs argument has been deprecated. Use cores instead.",
DeprecationWarning)
if 'nchains' in kwargs:
chains = kwargs['nchains']
warnings.warn(
"The nchains argument has been deprecated. Use chains instead.",
DeprecationWarning)
if chains is None:
chains = max(2, cores)
if isinstance(start, dict):
start = [start] * chains
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError(
'Invalid value for `random_seed`. Must be tuple, list or int')
if 'chain' in kwargs:
chain_idx = kwargs['chain']
warnings.warn(
"The chain argument has been deprecated. Use chain_idx instead.",
DeprecationWarning)
if start is not None:
for start_vals in start:
_check_start_shape(model, start_vals)
# small trace warning
if draws == 0:
msg = "Tuning was enabled throughout the whole trace."
_log.warning(msg)
elif draws < 500:
msg = "Only %s samples in chain." % draws
_log.warning(msg)
draws += tune
if model.ndim == 0:
raise ValueError('The model does not contain any free variables.')
if step is None and init is not None and all_continuous(model.vars):
try:
# By default, try to use NUTS
_log.info('Auto-assigning NUTS sampler...')
start_, step = init_nuts(init=init, chains=chains, n_init=n_init,
model=model, random_seed=random_seed,
progressbar=progressbar, **kwargs)
if start is None:
start = start_
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
# gradient computation failed
_log.info("Initializing NUTS failed. "
"Falling back to elementwise auto-assignment.")
_log.debug('Exception in init nuts', exec_info=True)
step = assign_step_methods(model, step, step_kwargs=kwargs)
else:
step = assign_step_methods(model, step, step_kwargs=kwargs)
if isinstance(step, list):
step = CompoundStep(step)
if start is None:
start = {}
if isinstance(start, dict):
start = [start] * chains
sample_args = {'draws': draws,
'step': step,
'start': start,
'trace': trace,
'chain': chain_idx,
'chains': chains,
'tune': tune,
'progressbar': progressbar,
'model': model,
'random_seed': random_seed,
'cores': cores, }
sample_args.update(kwargs)
has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared)
for m in (step.methods if isinstance(step, CompoundStep) else [step])])
parallel = cores > 1 and chains > 1 and not has_population_samplers
if parallel:
_log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores))
_print_step_hierarchy(step)
try:
trace = _mp_sample(**sample_args)
except pickle.PickleError:
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
except AttributeError as e:
if str(e).startswith("AttributeError: Can't pickle"):
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
else:
raise
if not parallel:
if has_population_samplers:
_log.info('Population sampling ({} chains)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_population(**sample_args, parallelize=cores > 1)
else:
_log.info('Sequential sampling ({} chains in 1 job)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_many(**sample_args)
discard = tune if discard_tuned_samples else 0
trace = trace[discard:]
if compute_convergence_checks:
if draws-tune < 100:
warnings.warn("The number of samples is too small to check convergence reliably.")
else:
trace.report._run_convergence_checks(trace, model)
trace.report._log_summary()
return trace
|
def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0,
chains=None, cores=None, tune=500, progressbar=True,
model=None, random_seed=None, discard_tuned_samples=True,
compute_convergence_checks=True, **kwargs):
"""Draw samples from the posterior using the given step methods.
Multiple step methods are supported via compound step methods.
Parameters
----------
draws : int
The number of samples to draw. Defaults to 500. The number of tuned samples are discarded
by default. See `discard_tuned_samples`.
step : function or iterable of functions
A step function or collection of functions. If there are variables without a step methods,
step methods for those variables will be assigned automatically.
init : str
Initialization method to use for auto-assigned NUTS samplers.
* auto : Choose a default initialization method automatically.
Currently, this is `'jitter+adapt_diag'`, but this can change in the future.
If you depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the
starting point in each chain.
* advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi : Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map : Use the MAP as starting point. This is discouraged.
* nuts : Run NUTS and estimate posterior mean and mass matrix from the trace.
n_init : int
Number of iterations of initializer. Only works for 'nuts' and 'ADVI'.
If 'ADVI', number of iterations, if 'nuts', number of draws.
start : dict, or array of dict
Starting point in parameter space (or partial point)
Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not
(defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can
overwrite the default. For 'SMC' step method, `start` should be a list of dicts
of length = `chains`.
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number `chain`. If None or a list of variables, the NDArray backend is used.
Passing either "text" or "sqlite" is taken as a shortcut to set up the corresponding
backend (with "mcmc" used as the base name). Ignored when using 'SMC' as step method.
chain_idx : int
Chain number used to store sample in backend. If `chains` is greater than one, chain
numbers will start here. Ignored when using 'SMC' as step method.
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics and can also reveal multiple modes in the posterior. If `None`,
then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the
number of draws.
cores : int
The number of chains to run in parallel. If `None`, set to the number of CPUs in the
system, but at most 4. When using 'SMC', this parameter will be ignored if running with
``pm.SMC(parallel=False)``. Keep in mind that
some chains might themselves be multithreaded via openmp or BLAS. In those cases it might
be faster to set this to 1.
tune : int
Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust
the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition
to the number specified in the `draws` argument, and will be discarded unless
`discard_tuned_samples` is set to False.
progressbar : bool
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
model : Model (optional if in `with` context)
random_seed : int or list of ints
A list is accepted if `cores` is greater than one.
discard_tuned_samples : bool
Whether to discard posterior samples of the tune interval. Ignored when using 'SMC'
compute_convergence_checks : bool, default=True
Whether to compute sampler statistics like Gelman-Rubin and `effective_n`.
Ignored when using 'SMC'
Returns
-------
trace : pymc3.backends.base.MultiTrace
A `MultiTrace` object that contains the samples.
Notes
-----
Optional keyword arguments can be passed to `sample` to be delivered to the
`step_method`s used during sampling. In particular, the NUTS step method accepts
a number of arguments. Common options are:
* target_accept: float in [0, 1]. The step size is tuned such that we approximate this
acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic
posteriors.
* max_treedepth: The maximum depth of the trajectory tree.
* step_scale: float, default 0.25
The initial guess for the step size scaled down by :math:`1/n**(1/4)`
You can find a full list of arguments in the docstring of the step methods.
Examples
--------
.. code:: ipython
>>> import pymc3 as pm
... n = 100
... h = 61
... alpha = 2
... beta = 2
.. code:: ipython
>>> with pm.Model() as model: # context management
... p = pm.Beta('p', alpha=alpha, beta=beta)
... y = pm.Binomial('y', n=n, p=p, observed=h)
... trace = pm.sample(2000, tune=1000, cores=4)
>>> pm.summary(trace)
mean sd mc_error hpd_2.5 hpd_97.5
p 0.604625 0.047086 0.00078 0.510498 0.694774
"""
model = modelcontext(model)
nuts_kwargs = kwargs.pop('nuts_kwargs', None)
if nuts_kwargs is not None:
warnings.warn("The nuts_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(nuts_kwargs)
step_kwargs = kwargs.pop('step_kwargs', None)
if step_kwargs is not None:
warnings.warn("The step_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(step_kwargs)
if cores is None:
cores = min(4, _cpu_count())
if isinstance(step, pm.step_methods.smc.SMC):
trace = smc.sample_smc(draws=draws,
step=step,
start=start,
cores=cores,
progressbar=progressbar,
model=model,
random_seed=random_seed)
else:
if 'njobs' in kwargs:
cores = kwargs['njobs']
warnings.warn(
"The njobs argument has been deprecated. Use cores instead.",
DeprecationWarning)
if 'nchains' in kwargs:
chains = kwargs['nchains']
warnings.warn(
"The nchains argument has been deprecated. Use chains instead.",
DeprecationWarning)
if chains is None:
chains = max(2, cores)
if isinstance(start, dict):
start = [start] * chains
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError(
'Invalid value for `random_seed`. Must be tuple, list or int')
if 'chain' in kwargs:
chain_idx = kwargs['chain']
warnings.warn(
"The chain argument has been deprecated. Use chain_idx instead.",
DeprecationWarning)
if start is not None:
for start_vals in start:
_check_start_shape(model, start_vals)
# small trace warning
if draws == 0:
msg = "Tuning was enabled throughout the whole trace."
_log.warning(msg)
elif draws < 500:
msg = "Only %s samples in chain." % draws
_log.warning(msg)
draws += tune
if model.ndim == 0:
raise ValueError('The model does not contain any free variables.')
if step is None and init is not None and all_continuous(model.vars):
try:
# By default, try to use NUTS
_log.info('Auto-assigning NUTS sampler...')
start_, step = init_nuts(init=init, chains=chains, n_init=n_init,
model=model, random_seed=random_seed,
progressbar=progressbar, **kwargs)
if start is None:
start = start_
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
# gradient computation failed
_log.info("Initializing NUTS failed. "
"Falling back to elementwise auto-assignment.")
_log.debug('Exception in init nuts', exec_info=True)
step = assign_step_methods(model, step, step_kwargs=kwargs)
else:
step = assign_step_methods(model, step, step_kwargs=kwargs)
if isinstance(step, list):
step = CompoundStep(step)
if start is None:
start = {}
if isinstance(start, dict):
start = [start] * chains
sample_args = {'draws': draws,
'step': step,
'start': start,
'trace': trace,
'chain': chain_idx,
'chains': chains,
'tune': tune,
'progressbar': progressbar,
'model': model,
'random_seed': random_seed,
'cores': cores, }
sample_args.update(kwargs)
has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared)
for m in (step.methods if isinstance(step, CompoundStep) else [step])])
parallel = cores > 1 and chains > 1 and not has_population_samplers
if parallel:
_log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores))
_print_step_hierarchy(step)
try:
trace = _mp_sample(**sample_args)
except pickle.PickleError:
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
except AttributeError as e:
if str(e).startswith("AttributeError: Can't pickle"):
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
else:
raise
if not parallel:
if has_population_samplers:
_log.info('Population sampling ({} chains)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_population(**sample_args, parallelize=cores > 1)
else:
_log.info('Sequential sampling ({} chains in 1 job)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_many(**sample_args)
discard = tune if discard_tuned_samples else 0
trace = trace[discard:]
if compute_convergence_checks:
if draws-tune < 100:
warnings.warn("The number of samples is too small to check convergence reliably.")
else:
trace.report._run_convergence_checks(trace, model)
trace.report._log_summary()
return trace
|
23,021 |
def test_frame_series_arithmetic_methods():
pdf1 = pd.DataFrame(
{
"A": np.arange(10),
"B": [np.nan, 1, 2, 3, 4] * 2,
"C": [np.nan] * 10,
"D": np.arange(10),
},
index=list("abcdefghij"),
columns=list("ABCD"),
)
pdf2 = pd.DataFrame(
np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX")
)
ps1 = pdf1.A
ps2 = pdf2.A
ps3 = pd.Series(np.random.randn(10), index=list("ABCDXabcde"))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 2)
ds1 = ddf1.A
ds2 = ddf2.A
s = dd.core.Scalar({("s", 0): 4}, "s", "i8")
for l, r, el, er in [
(ddf1, ddf2, pdf1, pdf2),
(ds1, ds2, ps1, ps2),
(ddf1.repartition(["a", "f", "j"]), ddf2, pdf1, pdf2),
(ds1.repartition(["a", "b", "f", "j"]), ds2, ps1, ps2),
(ddf1, ddf2.repartition(["a", "k"]), pdf1, pdf2),
(ds1, ds2.repartition(["a", "b", "d", "h", "k"]), ps1, ps2),
(ddf1, 3, pdf1, 3),
(ds1, 3, ps1, 3),
(ddf1, s, pdf1, 4),
(ds1, s, ps1, 4),
]:
# l, r may be repartitioned, test whether repartition keeps original data
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))
assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
assert_eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))
assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
assert_eq(l, el)
assert_eq(r, er)
# must specify axis=0 to add Series to each column
# axis=1 is not supported (add to each row)
assert_eq(l.add(r, axis=0), el.add(er, axis=0))
assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))
assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))
assert_eq(l.div(r, axis=0), el.div(er, axis=0))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))
assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))
assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))
assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
assert_eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))
pytest.raises(ValueError, lambda: l.add(r, axis=1))
for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:
assert_eq(l, el)
assert_eq(r, er)
for axis in [0, 1, "index", "columns"]:
assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))
assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
with warnings.catch_warnings():
# https://github.com/pandas-dev/pandas/issues/26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))
assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
assert_eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))
assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
|
def test_frame_series_arithmetic_methods():
pdf1 = pd.DataFrame(
{
"A": np.arange(10),
"B": [np.nan, 1, 2, 3, 4] * 2,
"C": [np.nan] * 10,
"D": np.arange(10),
},
index=list("abcdefghij"),
columns=list("ABCD"),
)
pdf2 = pd.DataFrame(
np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX")
)
ps1 = pdf1.A
ps2 = pdf2.A
ps3 = pd.Series(np.random.randn(10), index=list("ABCDXabcde"))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 2)
ds1 = ddf1.A
ds2 = ddf2.A
s = dd.core.Scalar({("s", 0): 4}, "s", "i8")
for l, r, el, er in [
(ddf1, ddf2, pdf1, pdf2),
(ds1, ds2, ps1, ps2),
(ddf1.repartition(["a", "f", "j"]), ddf2, pdf1, pdf2),
(ds1.repartition(["a", "b", "f", "j"]), ds2, ps1, ps2),
(ddf1, ddf2.repartition(["a", "k"]), pdf1, pdf2),
(ds1, ds2.repartition(["a", "b", "d", "h", "k"]), ps1, ps2),
(ddf1, 3, pdf1, 3),
(ds1, 3, ps1, 3),
(ddf1, s, pdf1, 4),
(ds1, s, ps1, 4),
]:
# l, r may be repartitioned, test whether repartition keeps original data
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))
assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
assert_eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))
assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
assert_eq(l, el)
assert_eq(r, er)
# must specify axis=0 to add Series to each column
# axis=1 is not supported (add to each row)
assert_eq(l.add(r, axis=0), el.add(er, axis=0))
assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))
assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))
assert_eq(l.div(r, axis=0), el.div(er, axis=0))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))
assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))
assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))
assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
assert_eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))
pytest.raises(ValueError, lambda: l.add(r, axis=1))
for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:
assert_eq(l, el)
assert_eq(r, er)
for axis in [0, 1, "index", "columns"]:
assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))
assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))
assert_eq(l.divide(r, axis=axis), el.divide(er, axis=axis))
assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
with warnings.catch_warnings():
# https://github.com/pandas-dev/pandas/issues/26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))
assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
assert_eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))
assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
|
8,287 |
def _readcube(rawdata, frame_start_index, frame_list,
width, height, channel_number,
width_norm, height_norm, rebin_energy,
SI_dtype, sweep, frame_shifts,
sum_frames, read_em_image, only_valid_data, lazy): # pragma: no cover
"""
Read spectrum image (and SEM/STEM image) from pts file
Parameters
----------
rawdata : numpy.ndarray
Spectrum image part of pts file.
frame_start_index : np.ndarray of shape (sweep+1, ) or (0, )
The indices of each frame start. If length is zero, the indices will be
determined from rawdata.
frame_list : list
List of frames to be read.
width, height : int
The navigation dimension.
channel_number : int
The number of channels.
width_norm, height_norm : int
Rebin factor of the navigation dimension
rebin_energy : int
Rebin factor of the energy dimension
sweep : int
Number of sweep
frame_shifts : list
The list of image positions [[x0,y0,z0], ...]. The x, y, z values can
be negative. The data points outside data cube are ignored.
Returns
-------
data : numpy.ndarray or dask.array
The spectrum image with shape (frame, x, y, energy) if sum_frames is
False, otherwise (x, y, energy).
If lazy is True, the dask array is a COO sparse array
em_data : numpy.ndarray or dask.array
The SEM/STEM image with shape (frame, x, y) if sum_frames is False,
otherwise (x, y).
has_em_image : bool
True if the stream contains SEM/STEM images.
sweep : int
The number of loaded frames.
frame_start_index : list
The indices of each frame start.
max_shift : numpy.ndarray
The maximum shifts of the origin in the navigation dimension
frame_shifts : numpy.ndarray
The shifts of the origin in the navigation dimension for each frame.
"""
import dask.array as da
# In case of sum_frames, spectrum image and SEM/STEM image are summing up to the same frame number.
# To avoid overflow on integration of SEM/STEM image, data type of np.uint32 is selected
# for 16 frames and over. (range of image intensity in each frame is 0-4095 (0-0xfff))
EM_dtype = np.uint16
frame_step = 1
if sum_frames:
frame_step = 0
if sweep >= 16:
EM_dtype = np.uint32
n_frames = 1
else:
n_frames = sweep + 1
if lazy:
hypermap = np.zeros((n_frames), dtype=EM_dtype) # dummy variable, not used
data_list = []
else:
hypermap = np.zeros((n_frames, height, width, channel_number),
dtype=SI_dtype)
em_image = np.zeros((n_frames, width, height), dtype=EM_dtype)
max_value = np.iinfo(SI_dtype).max
frame_shifts = np.asarray(frame_shifts)
frame_list = np.asarray(frame_list)
max_shift = frame_shifts[frame_list].max(axis=0)
min_shift = frame_shifts[frame_list].min(axis=0)
# sxyz = np.array([min_shift[0]-max_shift[0], min_shift[1]-max_shift[1],0])
min_shift[2]=0
max_shift[2]=0
sxyz = min_shift-max_shift
frame_shifts -= max_shift
width += sxyz[1]
height += sxyz[0]
if lazy:
readframe = _readframe_lazy
else:
readframe = _readframe_dense
frame_num = 0
p_start = 0
target_frame_num = 0
eof = rawdata.size
countup = 1
has_em_image = False
for frame_idx in frame_list:
if frame_idx < 0:
continue
elif frame_start_index[frame_idx] >= 0:
# if frame_idx is already indexed
p_start = frame_start_index[frame_idx]
elif frame_num < frame_idx and frame_start_index[frame_num] < 0:
# record start point of frame and skip frame
frame_start_index[frame_num] = p_start
p_start += _readframe_dummy(rawdata[p_start:])
frame_num += 1
continue
else:
frame_start_index[frame_idx] = p_start # = end of last frame
if frame_idx < frame_shifts.size:
fs = frame_shifts[frame_idx]
else:
fs = np.zeros(3, np.uint16)
_logger.info(f"Size of frame_shift array is too small. The frame {frame_idx} is not moved.")
length, frame_data, has_em, valid, max_valid = readframe(
rawdata[p_start:], 1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0], fs[2], max_value)
has_em_image = has_em_image or has_em
if length == 0: # no data
break
if valid or not only_valid_data:
# accept last frame
if lazy:
data_list.append(frame_data)
frame_num += 1
target_frame_num += frame_step
else:
# incomplete data, not accepted
if sum_frames:
# subtract signal counts of last frame
_ = readframe(rawdata[p_start:], -1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0],fs[2], max_value)
_logger.info("The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame.")
break
# else:
# pass
p_start += length
if not lazy:
if sum_frames:
# the first frame has integrated intensity
return hypermap[0,:height,:width], em_image[0,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
else:
return hypermap[:target_frame_num,:height,:width], em_image[:target_frame_num,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
# for lazy loading
from hyperspy.misc.io.fei_stream_readers import DenseSliceCOO
length = np.sum([len(d) for d in data_list])
# length = number of data points
# v : [[frame_no, y, x, energy_channel, 1], ....]
v = np.zeros(shape=(5, length), dtype=np.uint16)
ptr = 0
frame_count = 0
for d in data_list:
# d : data points in one frame
d = np.asarray(d)
# check if the pixels are in the valid data cube
# (frame_shifts make partially integrated area at the rim)
valid_cube = np.where((0<=d[:,0]) & (d[:,0]<height) & (0<=d[:,1]) & (d[:,1]<width) & (0<=d[:,2]) & (d[:,2]<channel_number))
d = d[valid_cube]
flen = len(d)
pv = v[:,ptr:ptr+flen]
pv[1:4, :] = np.array(d).transpose()
pv[0,:] = frame_count
pv[4,:] = 1
ptr += flen
frame_count += 1
if sum_frames:
data_shape = [height, width, channel_number]
ar_s = DenseSliceCOO(v[1:4], v[4], shape=data_shape)
else:
data_shape = [frame_count, height, width, channel_number]
ar_s = DenseSliceCOO(v[0:4], v[4], shape=data_shape)
if sum_frames:
em_image = em_image[0]
return da.from_array(ar_s, asarray=False), em_image, has_em_image, sweep, frame_start_index, valid, max_shift, frame_shifts
|
def _readcube(rawdata, frame_start_index, frame_list,
width, height, channel_number,
width_norm, height_norm, rebin_energy,
SI_dtype, sweep, frame_shifts,
sum_frames, read_em_image, only_valid_data, lazy): # pragma: no cover
"""
Read spectrum image (and SEM/STEM image) from pts file
Parameters
----------
rawdata : numpy.ndarray
Spectrum image part of pts file.
frame_start_index : np.ndarray of shape (sweep+1, ) or (0, )
The indices of each frame start. If length is zero, the indices will be
determined from rawdata.
frame_list : list
List of frames to be read.
width, height : int
The navigation dimension.
channel_number : int
The number of channels.
width_norm, height_norm : int
Rebin factor of the navigation dimension
rebin_energy : int
Rebin factor of the energy dimension
sweep : int
Number of sweep
frame_shifts : list
The list of image positions [[x0,y0,z0], ...]. The x, y, z values can
be negative. The data points outside data cube are ignored.
Returns
-------
data : numpy.ndarray or dask.array
The spectrum image with shape (frame, x, y, energy) if sum_frames is
False, otherwise (x, y, energy).
If lazy is True, the dask array is a COO sparse array.
em_data : numpy.ndarray or dask.array
The SEM/STEM image with shape (frame, x, y) if sum_frames is False,
otherwise (x, y).
has_em_image : bool
True if the stream contains SEM/STEM images.
sweep : int
The number of loaded frames.
frame_start_index : list
The indices of each frame start.
max_shift : numpy.ndarray
The maximum shifts of the origin in the navigation dimension
frame_shifts : numpy.ndarray
The shifts of the origin in the navigation dimension for each frame.
"""
import dask.array as da
# In case of sum_frames, spectrum image and SEM/STEM image are summing up to the same frame number.
# To avoid overflow on integration of SEM/STEM image, data type of np.uint32 is selected
# for 16 frames and over. (range of image intensity in each frame is 0-4095 (0-0xfff))
EM_dtype = np.uint16
frame_step = 1
if sum_frames:
frame_step = 0
if sweep >= 16:
EM_dtype = np.uint32
n_frames = 1
else:
n_frames = sweep + 1
if lazy:
hypermap = np.zeros((n_frames), dtype=EM_dtype) # dummy variable, not used
data_list = []
else:
hypermap = np.zeros((n_frames, height, width, channel_number),
dtype=SI_dtype)
em_image = np.zeros((n_frames, width, height), dtype=EM_dtype)
max_value = np.iinfo(SI_dtype).max
frame_shifts = np.asarray(frame_shifts)
frame_list = np.asarray(frame_list)
max_shift = frame_shifts[frame_list].max(axis=0)
min_shift = frame_shifts[frame_list].min(axis=0)
# sxyz = np.array([min_shift[0]-max_shift[0], min_shift[1]-max_shift[1],0])
min_shift[2]=0
max_shift[2]=0
sxyz = min_shift-max_shift
frame_shifts -= max_shift
width += sxyz[1]
height += sxyz[0]
if lazy:
readframe = _readframe_lazy
else:
readframe = _readframe_dense
frame_num = 0
p_start = 0
target_frame_num = 0
eof = rawdata.size
countup = 1
has_em_image = False
for frame_idx in frame_list:
if frame_idx < 0:
continue
elif frame_start_index[frame_idx] >= 0:
# if frame_idx is already indexed
p_start = frame_start_index[frame_idx]
elif frame_num < frame_idx and frame_start_index[frame_num] < 0:
# record start point of frame and skip frame
frame_start_index[frame_num] = p_start
p_start += _readframe_dummy(rawdata[p_start:])
frame_num += 1
continue
else:
frame_start_index[frame_idx] = p_start # = end of last frame
if frame_idx < frame_shifts.size:
fs = frame_shifts[frame_idx]
else:
fs = np.zeros(3, np.uint16)
_logger.info(f"Size of frame_shift array is too small. The frame {frame_idx} is not moved.")
length, frame_data, has_em, valid, max_valid = readframe(
rawdata[p_start:], 1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0], fs[2], max_value)
has_em_image = has_em_image or has_em
if length == 0: # no data
break
if valid or not only_valid_data:
# accept last frame
if lazy:
data_list.append(frame_data)
frame_num += 1
target_frame_num += frame_step
else:
# incomplete data, not accepted
if sum_frames:
# subtract signal counts of last frame
_ = readframe(rawdata[p_start:], -1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0],fs[2], max_value)
_logger.info("The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame.")
break
# else:
# pass
p_start += length
if not lazy:
if sum_frames:
# the first frame has integrated intensity
return hypermap[0,:height,:width], em_image[0,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
else:
return hypermap[:target_frame_num,:height,:width], em_image[:target_frame_num,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
# for lazy loading
from hyperspy.misc.io.fei_stream_readers import DenseSliceCOO
length = np.sum([len(d) for d in data_list])
# length = number of data points
# v : [[frame_no, y, x, energy_channel, 1], ....]
v = np.zeros(shape=(5, length), dtype=np.uint16)
ptr = 0
frame_count = 0
for d in data_list:
# d : data points in one frame
d = np.asarray(d)
# check if the pixels are in the valid data cube
# (frame_shifts make partially integrated area at the rim)
valid_cube = np.where((0<=d[:,0]) & (d[:,0]<height) & (0<=d[:,1]) & (d[:,1]<width) & (0<=d[:,2]) & (d[:,2]<channel_number))
d = d[valid_cube]
flen = len(d)
pv = v[:,ptr:ptr+flen]
pv[1:4, :] = np.array(d).transpose()
pv[0,:] = frame_count
pv[4,:] = 1
ptr += flen
frame_count += 1
if sum_frames:
data_shape = [height, width, channel_number]
ar_s = DenseSliceCOO(v[1:4], v[4], shape=data_shape)
else:
data_shape = [frame_count, height, width, channel_number]
ar_s = DenseSliceCOO(v[0:4], v[4], shape=data_shape)
if sum_frames:
em_image = em_image[0]
return da.from_array(ar_s, asarray=False), em_image, has_em_image, sweep, frame_start_index, valid, max_shift, frame_shifts
|
5,692 |
def binary_search_for_binom_tst(a, d, lo, hi, asc_order=False):
while lo < hi:
mid = (lo+hi)//2
midval = a(mid)
if midval < d:
if asc_order:
lo = mid+1
else:
hi = mid-1
elif midval > d:
if asc_order:
hi = mid-1
else:
lo = mid+1
else:
return mid
if a(lo)<=d:
return lo
else:
return lo-(asc_order-0.5)*2
|
def _binary_search_for_binom_tst(a, d, lo, hi, asc_order=False):
while lo < hi:
mid = (lo+hi)//2
midval = a(mid)
if midval < d:
if asc_order:
lo = mid+1
else:
hi = mid-1
elif midval > d:
if asc_order:
hi = mid-1
else:
lo = mid+1
else:
return mid
if a(lo)<=d:
return lo
else:
return lo-(asc_order-0.5)*2
|
42,618 |
def asset_from_gemini(symbol: str) -> Asset:
"""May raise:
- DeserializationError
- UnsupportedAsset
- UnknownAsset
"""
if not isinstance(symbol, str):
raise DeserializationError(f'Got non-string type {type(symbol)} for gemini asset')
if symbol in UNSUPPORTED_GEMENI_ASSETS:
raise UnsupportedAsset(symbol)
name = GEMENI_TO_WORLD.get(symbol, symbol)
return symbol_to_asset_or_token(name)
|
def asset_from_gemini(symbol: str) -> Asset:
"""May raise:
- DeserializationError
- UnsupportedAsset
- UnknownAsset
"""
if not isinstance(symbol, str):
raise DeserializationError(f'Got non-string type {type(symbol)} for gemini asset')
if symbol in UNSUPPORTED_GEMINI_ASSETS:
raise UnsupportedAsset(symbol)
name = GEMENI_TO_WORLD.get(symbol, symbol)
return symbol_to_asset_or_token(name)
|
52,840 |
def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
has_errors = False
if "type" not in param_attrib:
lint_ctx.error("Found param input with no type specified.")
has_errors = True
if "name" not in param_attrib and "argument" not in param_attrib:
lint_ctx.error("Found param input with no name specified.")
has_errors = True
if has_errors:
continue
param_type = param_attrib["type"]
param_name = _parse_name(param_attrib.get("name"), param_attrib.get("argument"))
if not is_valid_cheetah_placeholder(param_name):
lint_ctx.warn("Param input [%s] is not a valid Cheetah placeholder.", param_name)
if param_type == "data":
if "format" not in param_attrib:
lint_ctx.warn("Param input [%s] with no format specified - 'data' format will be assumed.", param_name)
elif param_type == "select":
dynamic_options = param.get("dynamic_options", None)
if dynamic_options is None:
dynamic_options = param.find("options")
select_options = param.findall('./option')
if any(['value' not in option.attrib for option in select_options]):
lint_ctx.error("Select [%s] has option without value", param_name)
if dynamic_options is None and len(select_options) == 0:
message = "No options defined for select [%s]" % param_name
lint_ctx.warn(message)
if param_attrib["display"] == "radio":
if param_attrib["optional"] == "true":
lint_ctx.error('Select [%s] display="radio" is incompatible with optional="true"', param_name)
if param_attrib["multiple"] == "true":
lint_ctx.error('Select [%s] display="radio" is incompatible with multiple="true"', param_name)
# TODO: Validate type, much more...
conditional_selects = tool_xml.findall("./inputs//conditional")
for conditional in conditional_selects:
conditional_name = conditional.get('name')
if not conditional_name:
lint_ctx.error("Conditional without a name")
if conditional.get("value_from"):
# Probably only the upload tool use this, no children elements
continue
first_param = conditional.find("param")
if first_param is None:
lint_ctx.error("Conditional '%s' has no child <param>" % conditional_name)
continue
first_param_type = first_param.get('type')
if first_param_type not in ['select', 'boolean']:
lint_ctx.warn("Conditional '%s' first param should have type=\"select\" /> or type=\"boolean\"" % conditional_name)
continue
if first_param_type == 'select':
select_options = _find_with_attribute(first_param, 'option', 'value')
option_ids = [option.get('value') for option in select_options]
else: # boolean
option_ids = [
first_param.get('truevalue', 'true'),
first_param.get('falsevalue', 'false')
]
if string_as_bool(first_param.get('optional', False)):
lint_ctx.warn("Conditional test parameter cannot be optional")
whens = conditional.findall('./when')
if any('value' not in when.attrib for when in whens):
lint_ctx.error("When without value")
when_ids = [w.get('value') for w in whens]
for option_id in option_ids:
if option_id not in when_ids:
lint_ctx.warn(f"No <when /> block found for {first_param_type} option '{option_id}' inside conditional '{conditional_name}'")
for when_id in when_ids:
if when_id not in option_ids:
if first_param_type == 'select':
lint_ctx.warn(f"No <option /> found for when block '{when_id}' inside conditional '{conditional_name}'")
else:
lint_ctx.warn(f"No truevalue/falsevalue found for when block '{when_id}' inside conditional '{conditional_name}'")
if datasource:
for datasource_tag in ('display', 'uihints'):
if not any([param.tag == datasource_tag for param in inputs]):
lint_ctx.info("%s tag usually present in data sources" % datasource_tag)
if num_inputs:
lint_ctx.info("Found %d input parameters.", num_inputs)
else:
if datasource:
lint_ctx.info("No input parameters, OK for data sources")
else:
lint_ctx.warn("Found no input parameters.")
|
def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
has_errors = False
if "type" not in param_attrib:
lint_ctx.error("Found param input with no type specified.")
has_errors = True
if "name" not in param_attrib and "argument" not in param_attrib:
lint_ctx.error("Found param input with no name specified.")
has_errors = True
if has_errors:
continue
param_type = param_attrib["type"]
param_name = _parse_name(param_attrib.get("name"), param_attrib.get("argument"))
if not is_valid_cheetah_placeholder(param_name):
lint_ctx.warn("Param input [%s] is not a valid Cheetah placeholder.", param_name)
if param_type == "data":
if "format" not in param_attrib:
lint_ctx.warn("Param input [%s] with no format specified - 'data' format will be assumed.", param_name)
elif param_type == "select":
dynamic_options = param.get("dynamic_options", None)
if dynamic_options is None:
dynamic_options = param.find("options")
select_options = param.findall('./option')
if any(['value' not in option.attrib for option in select_options]):
lint_ctx.error("Select [%s] has option without value", param_name)
if dynamic_options is None and len(select_options) == 0:
message = "No options defined for select [%s]" % param_name
lint_ctx.warn(message)
if param_attrib["display"] == "radio":
multiple = string_as_bool(param_attrib.get("multiple", False))
if multiple:
lint_ctx.error('Select [%s] display="radio" is incompatible with multiple="true"', param_name)
if string_as_bool(param_attrib.get("optional", multiple)):
lint_ctx.error('Select [%s] display="radio" is incompatible with optional="true"', param_name)
# TODO: Validate type, much more...
conditional_selects = tool_xml.findall("./inputs//conditional")
for conditional in conditional_selects:
conditional_name = conditional.get('name')
if not conditional_name:
lint_ctx.error("Conditional without a name")
if conditional.get("value_from"):
# Probably only the upload tool use this, no children elements
continue
first_param = conditional.find("param")
if first_param is None:
lint_ctx.error("Conditional '%s' has no child <param>" % conditional_name)
continue
first_param_type = first_param.get('type')
if first_param_type not in ['select', 'boolean']:
lint_ctx.warn("Conditional '%s' first param should have type=\"select\" /> or type=\"boolean\"" % conditional_name)
continue
if first_param_type == 'select':
select_options = _find_with_attribute(first_param, 'option', 'value')
option_ids = [option.get('value') for option in select_options]
else: # boolean
option_ids = [
first_param.get('truevalue', 'true'),
first_param.get('falsevalue', 'false')
]
if string_as_bool(first_param.get('optional', False)):
lint_ctx.warn("Conditional test parameter cannot be optional")
whens = conditional.findall('./when')
if any('value' not in when.attrib for when in whens):
lint_ctx.error("When without value")
when_ids = [w.get('value') for w in whens]
for option_id in option_ids:
if option_id not in when_ids:
lint_ctx.warn(f"No <when /> block found for {first_param_type} option '{option_id}' inside conditional '{conditional_name}'")
for when_id in when_ids:
if when_id not in option_ids:
if first_param_type == 'select':
lint_ctx.warn(f"No <option /> found for when block '{when_id}' inside conditional '{conditional_name}'")
else:
lint_ctx.warn(f"No truevalue/falsevalue found for when block '{when_id}' inside conditional '{conditional_name}'")
if datasource:
for datasource_tag in ('display', 'uihints'):
if not any([param.tag == datasource_tag for param in inputs]):
lint_ctx.info("%s tag usually present in data sources" % datasource_tag)
if num_inputs:
lint_ctx.info("Found %d input parameters.", num_inputs)
else:
if datasource:
lint_ctx.info("No input parameters, OK for data sources")
else:
lint_ctx.warn("Found no input parameters.")
|
27,999 |
def perform_analysis(args, skip_handler, context, actions, metadata_tool,
compile_cmd_count):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_reanalyze_on_failure:
LOG.warning("Usage of a DEPRECATED FLAG!\n"
"The --ctu-reanalyze-on-failure flag will be removed "
"in the upcoming releases!")
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start "
"with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# CHECK if any checkers are enabled and only execute that analyzer
# where at least one checker is enabled.
for analyzer in config_map.items():
analyzer_name, analyzer_cfg = analyzer
if not analyzer_cfg.any_checker_enabled():
LOG.warning(f"Disabling {analyzer_name} no checkers were enabled.")
analyzers = [a for a in analyzers if a != analyzer_name]
actions = prepare_actions(actions, analyzers)
# Save some metadata information.
for analyzer in analyzers:
metadata_info = {
'checkers': {},
'analyzer_statistics': {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": None}}
for check, data in config_map[analyzer].checks().items():
state, _ = data
metadata_info['checkers'].update({
check: state == CheckerState.enabled})
version = config_map[analyzer].get_version(check_env)
metadata_info['analyzer_statistics']['version'] = version
metadata_tool['analyzers'][analyzer] = metadata_info
if 'makefile' in args and args.makefile:
statistics_data = __get_statistics_data(args)
ctu_data = None
if ctu_collect or statistics_data:
ctu_data = __get_ctu_data(config_map, ctu_dir)
makefile_creator = MakeFileCreator(analyzers, args.output_path,
config_map, context, skip_handler,
ctu_collect, statistics_data,
ctu_data)
makefile_creator.create(actions)
return
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args)
if statistics_data:
statistics_data = manager.dict(statistics_data)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_data = manager.dict(__get_ctu_data(config_map, ctu_dir))
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_anal_skip_handler = None
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_anal_skip_handler = skip_handler
clangsa_config = config_map.get(ClangSA.ANALYZER_NAME)
if clangsa_config is not None:
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
clangsa_config,
args.jobs,
pre_anal_skip_handler,
ctu_data,
statistics_data,
manager)
else:
LOG.error("Can not run pre analysis without clang "
"static analyzer configuration.")
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata_tool,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager,
compile_cmd_count)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata_tool['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
def perform_analysis(args, skip_handler, context, actions, metadata_tool,
compile_cmd_count):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_reanalyze_on_failure:
LOG.warning("Usage of a DEPRECATED FLAG!\n"
"The --ctu-reanalyze-on-failure flag will be removed "
"in the upcoming releases!")
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start "
"with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# CHECK if any checkers are enabled and only execute that analyzer
# where at least one checker is enabled.
for analyzer in config_map.items():
analyzer_name, analyzer_cfg = analyzer
if not analyzer_cfg.any_checker_enabled():
LOG.warning(f"Disabling {analyzer_name} no checkers were enabled.")
analyzers.remove(analyzer_name)
actions = prepare_actions(actions, analyzers)
# Save some metadata information.
for analyzer in analyzers:
metadata_info = {
'checkers': {},
'analyzer_statistics': {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": None}}
for check, data in config_map[analyzer].checks().items():
state, _ = data
metadata_info['checkers'].update({
check: state == CheckerState.enabled})
version = config_map[analyzer].get_version(check_env)
metadata_info['analyzer_statistics']['version'] = version
metadata_tool['analyzers'][analyzer] = metadata_info
if 'makefile' in args and args.makefile:
statistics_data = __get_statistics_data(args)
ctu_data = None
if ctu_collect or statistics_data:
ctu_data = __get_ctu_data(config_map, ctu_dir)
makefile_creator = MakeFileCreator(analyzers, args.output_path,
config_map, context, skip_handler,
ctu_collect, statistics_data,
ctu_data)
makefile_creator.create(actions)
return
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args)
if statistics_data:
statistics_data = manager.dict(statistics_data)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_data = manager.dict(__get_ctu_data(config_map, ctu_dir))
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_anal_skip_handler = None
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_anal_skip_handler = skip_handler
clangsa_config = config_map.get(ClangSA.ANALYZER_NAME)
if clangsa_config is not None:
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
clangsa_config,
args.jobs,
pre_anal_skip_handler,
ctu_data,
statistics_data,
manager)
else:
LOG.error("Can not run pre analysis without clang "
"static analyzer configuration.")
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata_tool,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager,
compile_cmd_count)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata_tool['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
281 |
def sample_prior_predictive(samples=500,
model: Optional[Model]=None,
vars: Optional[TIterable[str]] = None,
var_names: Optional[TIterable[str]] = None,
random_seed=None) -> Dict[str, np.ndarray]:
"""Generate samples from the prior predictive distribution.
Parameters
----------
samples : int
Number of samples from the prior predictive to generate. Defaults to 500.
model : Model (optional if in `with` context)
vars : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. *DEPRECATED* - Use `var_names` argument instead.
var_names : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. Defaults to `model.named_vars`.
random_seed : int
Seed for the random number generator.
Returns
-------
dict
Dictionary with variable names as keys. The values are numpy arrays of prior
samples.
"""
model = modelcontext(model)
if vars is None and var_names is None:
vars = set(model.named_vars.keys())
vars_ = model.named_vars
elif vars is None:
vars = var_names
vars_ = vars
elif vars is not None:
warnings.warn("vars argument is deprecated in favor of var_names.",
DeprecationWarning)
vars_ = vars
else:
raise ValueError("Cannot supply both vars and var_names arguments.")
vars = cast(TIterable[str], vars) # tell mypy that vars cannot be None here.
if random_seed is not None:
np.random.seed(random_seed)
names = get_default_varnames(vars_, include_transformed=False)
# draw_values fails with auto-transformed variables. transform them later!
values = draw_values([model[name] for name in names], size=samples)
data = {k: v for k, v in zip(names, values)}
if data is None:
raise AssertionError("No variables sampled: attempting to sample %s"%names)
prior = {} # type: Dict[str, np.ndarray]
for var_name in vars:
if var_name in data:
prior[var_name] = data[var_name]
elif is_transformed_name(var_name):
untransformed = get_untransformed_name(var_name)
if untransformed in data:
prior[var_name] = model[untransformed].transformation.forward_val(
data[untransformed])
return prior
|
def sample_prior_predictive(samples=500,
model: Optional[Model]=None,
vars: Optional[TIterable[str]] = None,
var_names: Optional[TIterable[str]] = None,
random_seed=None) -> Dict[str, np.ndarray]:
"""Generate samples from the prior predictive distribution.
Parameters
----------
samples : int
Number of samples from the prior predictive to generate. Defaults to 500.
model : Model (optional if in `with` context)
vars : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. *DEPRECATED* - Use `var_names` argument instead.
var_names : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. Defaults to ``model.named_vars``.
random_seed : int
Seed for the random number generator.
Returns
-------
dict
Dictionary with variable names as keys. The values are numpy arrays of prior
samples.
"""
model = modelcontext(model)
if vars is None and var_names is None:
vars = set(model.named_vars.keys())
vars_ = model.named_vars
elif vars is None:
vars = var_names
vars_ = vars
elif vars is not None:
warnings.warn("vars argument is deprecated in favor of var_names.",
DeprecationWarning)
vars_ = vars
else:
raise ValueError("Cannot supply both vars and var_names arguments.")
vars = cast(TIterable[str], vars) # tell mypy that vars cannot be None here.
if random_seed is not None:
np.random.seed(random_seed)
names = get_default_varnames(vars_, include_transformed=False)
# draw_values fails with auto-transformed variables. transform them later!
values = draw_values([model[name] for name in names], size=samples)
data = {k: v for k, v in zip(names, values)}
if data is None:
raise AssertionError("No variables sampled: attempting to sample %s"%names)
prior = {} # type: Dict[str, np.ndarray]
for var_name in vars:
if var_name in data:
prior[var_name] = data[var_name]
elif is_transformed_name(var_name):
untransformed = get_untransformed_name(var_name)
if untransformed in data:
prior[var_name] = model[untransformed].transformation.forward_val(
data[untransformed])
return prior
|
42,923 |
def c_1(clique: list, graph: nx.Graph):
"""Generates the set :math:`C_1` of nodes that are connected to all but one of the nodes in
the input clique subgraph
The set :math:`C_1` is defined in :cite:`pullan2006phased` and is used to determine outside
nodes that can be swapped with clique nodes to create a new clique.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.wheel_graph(5)
>>> subgraph = [0, 1, 2] # these nodes form a clique
>>> utils.c_1(subgraph, graph)
[(1, 3), (2, 4)]
Args:
clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique
graph (nx.Graph): the input graph
Returns:
list[int]: A list of tuples. The first node in the tuple is the node in the clique and the
second node is the outside node it can be swapped with.
"""
if not is_clique(graph.subgraph(clique)):
raise ValueError("Input subgraph is not a clique")
clique = set(clique)
c_1_nodes = []
non_clique_nodes = set(graph.nodes) - clique
for i in non_clique_nodes:
neighbors_in_subgraph = clique.intersection(graph.neighbors(i))
if len(neighbors_in_subgraph) == len(clique) - 1:
to_swap = clique - neighbors_in_subgraph
(i_clique,) = to_swap
c_1_nodes.append((i_clique, i))
return c_1_nodes
|
def c_1(clique: list, graph: nx.Graph):
"""Generates the set :math:`C_1` of nodes that are connected to all but one of the nodes in
the input clique subgraph
The set :math:`C_1` is defined in :cite:`pullan2006phased` and is used to determine outside
nodes that can be swapped with clique nodes to create a new clique.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.wheel_graph(5)
>>> subgraph = [0, 1, 2] # these nodes form a clique
>>> utils.c_1(subgraph, graph)
[(1, 3), (2, 4)]
Args:
clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique
graph (nx.Graph): the input graph
Returns:
list[tuple(int)]: A list of tuples. The first node in the tuple is the node in the clique and the
second node is the outside node it can be swapped with.
"""
if not is_clique(graph.subgraph(clique)):
raise ValueError("Input subgraph is not a clique")
clique = set(clique)
c_1_nodes = []
non_clique_nodes = set(graph.nodes) - clique
for i in non_clique_nodes:
neighbors_in_subgraph = clique.intersection(graph.neighbors(i))
if len(neighbors_in_subgraph) == len(clique) - 1:
to_swap = clique - neighbors_in_subgraph
(i_clique,) = to_swap
c_1_nodes.append((i_clique, i))
return c_1_nodes
|
1,139 |
def loadpkl(infile):
"""Load a zipped or plain cPickled file."""
infile = Path(infile)
fmlogger.debug("Loading pkl: %s", infile)
pklopen = gzip.open if infile.suffix == ".pklz" else open
t = time()
timeout = float(config.get("execution", "job_finished_timeout"))
timed_out = True
while (time() - t) < timeout:
try:
glob(str(infile)).pop()
timed_out = False
break
except Exception as e:
fmlogger.debug(e)
sleep(2)
if timed_out:
error_message = (
"Ressult file {0} expected, but "
"does not exist after ({1}) "
"seconds.".format(infile, timeout)
)
raise IOError(error_message)
with pklopen(str(infile), "rb") as pkl_file:
pkl_contents = pkl_file.read()
pkl_metadata = None
# Look if pkl file contains version metadata
idx = pkl_contents.find(b"\n")
if idx >= 0:
try:
pkl_metadata = json.loads(pkl_contents[:idx])
except (UnicodeDecodeError, json.JSONDecodeError):
# Could not get version info
pass
else:
# On success, skip JSON metadata
pkl_contents = pkl_contents[idx + 1 :]
# Pickle files may contain relative paths that must be resolved relative
# to the working directory, so use indirectory while attempting to load
unpkl = None
try:
with indirectory(infile.parent):
unpkl = pickle.loads(pkl_contents)
except UnicodeDecodeError:
# Was this pickle created with Python 2.x?
with indirectory(infile.parent):
unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding="utf-8")
fmlogger.info("Successfully loaded pkl in compatibility mode.")
# Unpickling problems
except Exception as e:
if pkl_metadata and "version" in pkl_metadata:
from nipype import __version__ as version
if pkl_metadata["version"] != version:
fmlogger.error(
"""\
Attempted to open a results file generated by Nipype version %s, \
with an incompatible Nipype version (%s)""",
pkl_metadata["version"],
version,
)
raise e
fmlogger.warning(
"""\
No metadata was found in the pkl file. Make sure you are currently using \
the same Nipype version from the generated pkl."""
)
raise e
if unpkl is None:
raise ValueError("Loading %s resulted in None." % infile)
return unpkl
|
def loadpkl(infile):
"""Load a zipped or plain cPickled file."""
infile = Path(infile)
fmlogger.debug("Loading pkl: %s", infile)
pklopen = gzip.open if infile.suffix == ".pklz" else open
t = time()
timeout = float(config.get("execution", "job_finished_timeout"))
timed_out = True
while (time() - t) < timeout:
try:
glob(str(infile)).pop()
timed_out = False
break
except Exception as e:
fmlogger.debug(e)
sleep(2)
if timed_out:
error_message = (
"Result file {0} expected, but "
"does not exist after ({1}) "
"seconds.".format(infile, timeout)
)
raise IOError(error_message)
with pklopen(str(infile), "rb") as pkl_file:
pkl_contents = pkl_file.read()
pkl_metadata = None
# Look if pkl file contains version metadata
idx = pkl_contents.find(b"\n")
if idx >= 0:
try:
pkl_metadata = json.loads(pkl_contents[:idx])
except (UnicodeDecodeError, json.JSONDecodeError):
# Could not get version info
pass
else:
# On success, skip JSON metadata
pkl_contents = pkl_contents[idx + 1 :]
# Pickle files may contain relative paths that must be resolved relative
# to the working directory, so use indirectory while attempting to load
unpkl = None
try:
with indirectory(infile.parent):
unpkl = pickle.loads(pkl_contents)
except UnicodeDecodeError:
# Was this pickle created with Python 2.x?
with indirectory(infile.parent):
unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding="utf-8")
fmlogger.info("Successfully loaded pkl in compatibility mode.")
# Unpickling problems
except Exception as e:
if pkl_metadata and "version" in pkl_metadata:
from nipype import __version__ as version
if pkl_metadata["version"] != version:
fmlogger.error(
"""\
Attempted to open a results file generated by Nipype version %s, \
with an incompatible Nipype version (%s)""",
pkl_metadata["version"],
version,
)
raise e
fmlogger.warning(
"""\
No metadata was found in the pkl file. Make sure you are currently using \
the same Nipype version from the generated pkl."""
)
raise e
if unpkl is None:
raise ValueError("Loading %s resulted in None." % infile)
return unpkl
|
14,584 |
def load_pldata_file(directory, topic):
ts_file = os.path.join(directory, topic + "_timestamps.npy")
msgpack_file = os.path.join(directory, topic + ".pldata")
try:
data = collections.deque()
topics = collections.deque()
data_ts = np.load(ts_file)
with open(msgpack_file, "rb") as fh:
for topic, payload in msgpack.Unpacker(
fh, raw=False, use_list=False, strict_map_key=False
):
data.append(Serialized_Dict(msgpack_bytes=payload))
topics.append(topic)
except FileNotFoundError:
data = []
data_ts = []
topics = []
return PLData(data, data_ts, topics)
|
def load_pldata_file(directory, topic):
ts_file = os.path.join(directory, topic + "_timestamps.npy")
msgpack_file = os.path.join(directory, topic + ".pldata")
try:
data = collections.deque()
topics = collections.deque()
data_ts = np.load(ts_file)
with open(msgpack_file, "rb") as fh:
for topic, payload in msgpack.Unpacker(
fh, use_list=False, strict_map_key=False
):
data.append(Serialized_Dict(msgpack_bytes=payload))
topics.append(topic)
except FileNotFoundError:
data = []
data_ts = []
topics = []
return PLData(data, data_ts, topics)
|
42,635 |
def add_starting_nfts(datahandler):
"""Adds a time serie for an account owning a NFT"""
datahandler.db.add_asset_identifiers(['_nft_pickle'])
balances = [
DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488326400),
asset=Asset('_nft_pickle'),
amount='1',
usd_value='1000',
), DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488426400),
asset=Asset('_nft_pickle'),
amount='1',
usd_value='1000',
), DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488526400),
asset=Asset('_nft_pickle'),
amount='2',
usd_value='2000',
), DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488626400),
asset=Asset('_nft_pickle'),
amount='1',
usd_value='1000',
),
]
datahandler.db.add_multiple_balances(balances)
datahandler.db.conn.commit()
location_data = [
LocationData(
time=Timestamp(1488326400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='3000',
),
LocationData(
time=Timestamp(1488426400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='4000',
),
LocationData(
time=Timestamp(1488526400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='5000',
),
LocationData(
time=Timestamp(1488626400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='5500',
),
]
datahandler.db.add_multiple_location_data(location_data)
|
def add_starting_nfts(datahandler):
"""Adds a time series for an account owning a NFT"""
datahandler.db.add_asset_identifiers(['_nft_pickle'])
balances = [
DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488326400),
asset=Asset('_nft_pickle'),
amount='1',
usd_value='1000',
), DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488426400),
asset=Asset('_nft_pickle'),
amount='1',
usd_value='1000',
), DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488526400),
asset=Asset('_nft_pickle'),
amount='2',
usd_value='2000',
), DBAssetBalance(
category=BalanceType.ASSET,
time=Timestamp(1488626400),
asset=Asset('_nft_pickle'),
amount='1',
usd_value='1000',
),
]
datahandler.db.add_multiple_balances(balances)
datahandler.db.conn.commit()
location_data = [
LocationData(
time=Timestamp(1488326400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='3000',
),
LocationData(
time=Timestamp(1488426400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='4000',
),
LocationData(
time=Timestamp(1488526400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='5000',
),
LocationData(
time=Timestamp(1488626400),
location=Location.TOTAL.serialize_for_db(), # pylint: disable=no-member
usd_value='5500',
),
]
datahandler.db.add_multiple_location_data(location_data)
|
36,438 |
def _create_tree(fullmodule, path, fname, source, tree, inpackage):
"""Return the tree for a particular module.
fullmodule (full module name), inpackage+module, becomes o.module.
path is passed to recursive calls of _readmodule.
fname becomes o.file.
source is tokenized. Imports cause recursive calls to _readmodule.
tree is {} or {'__path__': <submodule search locations>}.
inpackage, None or string, is passed to recursive calls of _readmodule.
The effect of recursive calls is mutation of global _modules.
"""
f = io.StringIO(source)
# stack = [] # Initialize stack of (class, indent) pairs.
stack = Stack() # changing the source code so as to get the ending line
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, func_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip def with syntax error.
cur_func = None
if stack:
cur_obj = stack[-1][0]
cur_func = _nest_function(cur_obj, func_name, lineno)
else:
# It is just a function.
cur_func = Function(fullmodule, func_name, fname, lineno)
tree[func_name] = cur_func
stack.append((cur_func, thisindent))
elif token == 'class':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip class with syntax error.
# Parse what follows the class name.
tokentype, token, start = next(g)[0:3]
inherit = None
if token == '(':
names = [] # Initialize list of superclasses.
level = 1
super = [] # Tokens making up current superclass.
while True:
tokentype, token, start = next(g)[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in tree:
# We know this super class.
n = tree[n]
else:
c = n.split('.')
if len(c) > 1:
# Super class form is module.class:
# look in module for class.
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# Only use NAME and OP (== dot) tokens for type name.
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# Expressions in the base list are not supported.
inherit = names
if stack:
cur_obj = stack[-1][0]
cur_class = _nest_class(
cur_obj, class_name, lineno, inherit)
else:
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
tree[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module.
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module.
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# Add any classes that were defined in the imported module
# to our name space if they were mentioned in the list.
for n, n2 in names:
if n in d:
tree[n2 or n] = d[n]
elif n == '*':
# Don't add names that start with _.
for n in d:
if n[0] != '_':
tree[n] = d[n]
except StopIteration:
pass
f.close()
return tree
|
def _create_tree(fullmodule, path, fname, source, tree, inpackage):
"""Return the tree for a particular module.
fullmodule (full module name), inpackage+module, becomes o.module.
path is passed to recursive calls of _readmodule.
fname becomes o.file.
source is tokenized. Imports cause recursive calls to _readmodule.
tree is {} or {'__path__': <submodule search locations>}.
inpackage, None or string, is passed to recursive calls of _readmodule.
The effect of recursive calls is mutation of global _modules.
"""
f = io.StringIO(source)
# stack = [] # Initialize stack of (class, indent) pairs.
stack = Stack() # Initialize stack of (class, indent) pairs.
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, func_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip def with syntax error.
cur_func = None
if stack:
cur_obj = stack[-1][0]
cur_func = _nest_function(cur_obj, func_name, lineno)
else:
# It is just a function.
cur_func = Function(fullmodule, func_name, fname, lineno)
tree[func_name] = cur_func
stack.append((cur_func, thisindent))
elif token == 'class':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip class with syntax error.
# Parse what follows the class name.
tokentype, token, start = next(g)[0:3]
inherit = None
if token == '(':
names = [] # Initialize list of superclasses.
level = 1
super = [] # Tokens making up current superclass.
while True:
tokentype, token, start = next(g)[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in tree:
# We know this super class.
n = tree[n]
else:
c = n.split('.')
if len(c) > 1:
# Super class form is module.class:
# look in module for class.
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# Only use NAME and OP (== dot) tokens for type name.
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# Expressions in the base list are not supported.
inherit = names
if stack:
cur_obj = stack[-1][0]
cur_class = _nest_class(
cur_obj, class_name, lineno, inherit)
else:
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
tree[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module.
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module.
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# Add any classes that were defined in the imported module
# to our name space if they were mentioned in the list.
for n, n2 in names:
if n in d:
tree[n2 or n] = d[n]
elif n == '*':
# Don't add names that start with _.
for n in d:
if n[0] != '_':
tree[n] = d[n]
except StopIteration:
pass
f.close()
return tree
|
17,690 |
def diff_dataset(
dataset,
fr,
to,
constant_refs,
path=None,
annex=None,
untracked='normal',
recursive=False,
recursion_limit=None,
eval_file_type=True,
reporting_order='depth-first',
datasets_only=False,
):
"""Internal helper to diff a dataset
Parameters
----------
dataset : Dataset
Dataset to perform the diff on. `fr` and `to` parameters are interpreted
in the context of this dataset.
fr : str
Commit-ish to compare from.
to : str
Commit-ish to compare to.
constant_refs : bool
If True, `fr` and `to` will be passed on unmodified to diff operations
on subdatasets. This can be useful with symbolic references like tags
to report subdataset changes independent of superdataset changes.
If False, `fr` and `to` will be translated to the subdataset commit-ish
that match the given commit-ish in the superdataset.
path : Path-like, optional
Paths to constrain the diff to (see main diff() command).
annex : str, optional
Reporting mode for annex properties (see main diff() command).
untracked : str, optional
Reporting mode for untracked content (see main diff() command).
recursive : bool, optional
Flag to enable recursive operation (see main diff() command).
recursion_limit : int, optional
Recursion limit (see main diff() command).
eval_file_type : bool, optional
Whether to perform file type discrimination between real symlinks
and symlinks representing annex'ed files. This can be expensive
in datasets with many files.
reporting_order : {'depth-first', 'breadth-first', 'bottom-up'}, optional
By default, subdataset content records are reported after the record
on the subdataset's submodule in a superdataset (depth-first).
Alternatively, report all superdataset records first, before reporting
any subdataset content records (breadth-first). Both 'depth-first'
and 'breadth-first' both report dataset content before considering
subdatasets. Alternative 'bottom-up' mode is similar to 'depth-first'
but dataset content is reported after reporting on subdatasets.
Yields
------
dict
DataLad result records.
"""
if reporting_order not in ('depth-first', 'breadth-first', 'bottom-up'):
raise ValueError('Unknown reporting order: {}'.format(reporting_order))
ds = require_dataset(
dataset, check_installed=True, purpose='report difference')
# we cannot really perform any sorting of paths into subdatasets
# or rejecting paths based on the state of the filesystem, as
# we need to be able to compare with states that are not represented
# in the worktree (anymore)
if path:
if datasets_only:
raise NotImplementedError("Analysis of provided paths in datasets_only mode is not implemnented")
ps = []
# sort any path argument into the respective subdatasets
for p in sorted(ensure_list(path)):
# it is important to capture the exact form of the
# given path argument, before any normalization happens
# distinguish rsync-link syntax to identify
# a dataset as whole (e.g. 'ds') vs its
# content (e.g. 'ds/')
# special case is the root dataset, always report its content
# changes
orig_path = str(p)
resolved_path = resolve_path(p, dataset)
p = \
resolved_path, \
orig_path.endswith(op.sep) or resolved_path == ds.pathobj
str_path = str(p[0])
root = get_dataset_root(str_path)
if root is None:
# no root, not possibly underneath the refds
yield dict(
action='status',
path=str_path,
refds=ds.path,
status='error',
message='path not underneath this dataset',
logger=lgr)
continue
if path_under_rev_dataset(ds, str_path) is None:
# nothing we support handling any further
# there is only a single refds
yield dict(
path=str_path,
refds=ds.path,
action='diff',
status='error',
message=(
"dataset containing given paths is not underneath "
"the reference dataset %s: %s",
ds, str_path),
logger=lgr,
)
continue
ps.append(p)
path = ps
# TODO we might want to move away from the single-pass+immediate-yield
# paradigm for this command. If we gather all information first, we
# could do post-processing and detect when a file (same gitsha, or same
# key) was copied/moved from another dataset. Another command (e.g.
# save) could act on this information and also move/copy
# availability information or at least enhance the respective commit
# message with cross-dataset provenance info
# cache to help avoid duplicate status queries
content_info_cache = {}
for res in _diff_ds(
ds,
fr,
to,
constant_refs,
recursion_limit
if recursion_limit is not None and recursive
else -1 if recursive else 0,
# TODO recode paths to repo path reference
origpaths=None if not path else OrderedDict(path),
untracked=untracked,
annexinfo=annex,
eval_file_type=eval_file_type,
cache=content_info_cache,
order=reporting_order,
datasets_only=datasets_only,
):
res.update(
refds=ds.path,
logger=lgr,
action='diff',
)
yield res
|
def diff_dataset(
dataset,
fr,
to,
constant_refs,
path=None,
annex=None,
untracked='normal',
recursive=False,
recursion_limit=None,
eval_file_type=True,
reporting_order='depth-first',
datasets_only=False,
):
"""Internal helper to diff a dataset
Parameters
----------
dataset : Dataset
Dataset to perform the diff on. `fr` and `to` parameters are interpreted
in the context of this dataset.
fr : str
Commit-ish to compare from.
to : str
Commit-ish to compare to.
constant_refs : bool
If True, `fr` and `to` will be passed on unmodified to diff operations
on subdatasets. This can be useful with symbolic references like tags
to report subdataset changes independent of superdataset changes.
If False, `fr` and `to` will be translated to the subdataset commit-ish
that match the given commit-ish in the superdataset.
path : Path-like, optional
Paths to constrain the diff to (see main diff() command).
annex : str, optional
Reporting mode for annex properties (see main diff() command).
untracked : str, optional
Reporting mode for untracked content (see main diff() command).
recursive : bool, optional
Flag to enable recursive operation (see main diff() command).
recursion_limit : int, optional
Recursion limit (see main diff() command).
eval_file_type : bool, optional
Whether to perform file type discrimination between real symlinks
and symlinks representing annex'ed files. This can be expensive
in datasets with many files.
reporting_order : {'depth-first', 'breadth-first', 'bottom-up'}, optional
By default, subdataset content records are reported after the record
on the subdataset's submodule in a superdataset (depth-first).
Alternatively, report all superdataset records first, before reporting
any subdataset content records (breadth-first). Both 'depth-first'
and 'breadth-first' both report dataset content before considering
subdatasets. Alternative 'bottom-up' mode is similar to 'depth-first'
but dataset content is reported after reporting on subdatasets.
Yields
------
dict
DataLad result records.
"""
if reporting_order not in ('depth-first', 'breadth-first', 'bottom-up'):
raise ValueError('Unknown reporting order: {}'.format(reporting_order))
ds = require_dataset(
dataset, check_installed=True, purpose='report difference')
# we cannot really perform any sorting of paths into subdatasets
# or rejecting paths based on the state of the filesystem, as
# we need to be able to compare with states that are not represented
# in the worktree (anymore)
if path:
if datasets_only:
raise NotImplementedError("Analysis of provided paths in datasets_only mode is not implemented")
ps = []
# sort any path argument into the respective subdatasets
for p in sorted(ensure_list(path)):
# it is important to capture the exact form of the
# given path argument, before any normalization happens
# distinguish rsync-link syntax to identify
# a dataset as whole (e.g. 'ds') vs its
# content (e.g. 'ds/')
# special case is the root dataset, always report its content
# changes
orig_path = str(p)
resolved_path = resolve_path(p, dataset)
p = \
resolved_path, \
orig_path.endswith(op.sep) or resolved_path == ds.pathobj
str_path = str(p[0])
root = get_dataset_root(str_path)
if root is None:
# no root, not possibly underneath the refds
yield dict(
action='status',
path=str_path,
refds=ds.path,
status='error',
message='path not underneath this dataset',
logger=lgr)
continue
if path_under_rev_dataset(ds, str_path) is None:
# nothing we support handling any further
# there is only a single refds
yield dict(
path=str_path,
refds=ds.path,
action='diff',
status='error',
message=(
"dataset containing given paths is not underneath "
"the reference dataset %s: %s",
ds, str_path),
logger=lgr,
)
continue
ps.append(p)
path = ps
# TODO we might want to move away from the single-pass+immediate-yield
# paradigm for this command. If we gather all information first, we
# could do post-processing and detect when a file (same gitsha, or same
# key) was copied/moved from another dataset. Another command (e.g.
# save) could act on this information and also move/copy
# availability information or at least enhance the respective commit
# message with cross-dataset provenance info
# cache to help avoid duplicate status queries
content_info_cache = {}
for res in _diff_ds(
ds,
fr,
to,
constant_refs,
recursion_limit
if recursion_limit is not None and recursive
else -1 if recursive else 0,
# TODO recode paths to repo path reference
origpaths=None if not path else OrderedDict(path),
untracked=untracked,
annexinfo=annex,
eval_file_type=eval_file_type,
cache=content_info_cache,
order=reporting_order,
datasets_only=datasets_only,
):
res.update(
refds=ds.path,
logger=lgr,
action='diff',
)
yield res
|
50,493 |
def updateChangelog(filename: str, lines: List[str]):
newLines = []
nextLine = None
for line in lines:
if line == "Next Release":
line = f"{VERSION} ({time.strftime('%d %B %Y')})"
nextLine = "-" * len(line)
elif nextLine:
line = nextLine
nextLine = None
newLines.append(line)
return newLines
|
def updateChangelog(filename: str, lines: List[str]):
newLines = []
for line in lines:
if line == "Next Release":
newLines.append(f"{VERSION} ({time.strftime('%d %B %Y')})")
else:
newLines.append(line)
return newLines
|
53,297 |
def synthetic_radiograph(
obj, size=None, bins=None, ignore_grid=False, optical_density=False
):
r"""
Calculate a "synthetic radiograph" (particle count histogram in the
image plane).
Parameters
----------
obj: `dict` or `~plasmapy.diagnostics.charged_particle_radiography.Tracker`
Either a `~plasmapy.diagnostics.charged_particle_radiography.Tracker`
object that has been run, or an output dictionary created by
running `~plasmapy.diagnostics.charged_particle_radiography.Tracker`.
size : `~astropy.units.Quantity`, shape ``(2, 2)``, optional
The size of the detector array, specified as the minimum
and maximum values included in both the horizontal and vertical
directions in the detector plane coordinates. Shape is
``((hmin, hmax), (vmin, vmax))``. If not specified, the size will be
set to include all particles on the detector. Units must be convertable
to meters.
bins : array of integers, shape ``(2)``
The number of bins in each direction in the format
``(hbins, vbins)``. The default is ``(200, 200)``.
ignore_grid: `bool`
If `True`, returns the intensity in the image plane in the absence
of simulated fields.
optical_density: `bool`
If `True`, return the optical density rather than the intensity
.. math::
OD = -log_{10}(Intensity/I_0)
where :math:`Intensity` is the simulation intensity on the
detector plane and :math:`I_0` is the intensity on the detector
plane in the absence of simulated fields. Default is `False`.
If the `Intensity` histogram contains zeros, `OD` will contain
-`~numpy.inf` values. These can be easily replaced with zeros
if desired for plotting using `~numpy.nan_to_num`
``OD = np.nan_to_num(OD, neginf=0)``
Returns
-------
hax : `~astropy.units.Quantity` array shape ``(hbins,)``
The horizontal axis of the synthetic radiograph in meters.
vax : `~astropy.units.Quantity` array shape ``(vbins, )``
The vertical axis of the synthetic radiograph in meters.
intensity : `~numpy.ndarray`, shape ``(hbins, vbins)``
The number of particles counted in each bin of the histogram.
"""
# results_dict will raise an error if the simulation has not been run.
if isinstance(obj, Tracker):
d = obj.results_dict
# Check if dictionary-like (dict or npz file)
elif isinstance(obj, dict):
d = obj
else:
raise ValueError(
"The first argument of synthetic_radiograph must be "
"either a cpr.Tracker or an "
"output dictionary from cpr.Tracker"
)
if bins is None:
bins = [200, 200]
# Note that, at the end of the simulation, all particles were moved
# into the image plane.
# If ignore_grid is True, use the predicted positions in the absence of
# simulated fields
if ignore_grid:
xloc = d["x0"]
yloc = d["y0"]
else:
xloc = d["x"]
yloc = d["y"]
if size is None:
# If a detector size is not given, choose a size based on the
# particle positions
w = np.max([np.max(np.abs(xloc)), np.max(np.abs(yloc))])
size = np.array([[-w, w], [-w, w]]) * u.m
elif not isinstance(size, u.Quantity) or not size.unit.is_equivalent(u.m):
raise TypeError(
"``size`` must be an `~astropy.units.Quantity` "
"object with units convertable to meters."
)
elif size.shape != (2, 2):
raise ValueError(
"``size`` must have shape ``(2,2)`` corresponding to "
"``[[xmin, xmax], [ymin, ymax]]``."
)
# Generate the histogram
intensity, h, v = np.histogram2d(xloc, yloc, range=size.to(u.m).value, bins=bins)
# h, v are the bin edges: compute the centers to produce arrays
# of the right length (then trim off the extra point)
h = ((h + np.roll(h, -1)) / 2)[:-1]
v = ((v + np.roll(v, -1)) / 2)[:-1]
# Throw a warning if < 50% of the particles are included on the
# histogram
percentage = np.sum(intensity) / d["nparticles"]
if percentage < 0.5:
warnings.warn(
f"Only {percentage:.2%} of the particles are shown "
"on this synthetic radiograph. Consider increasing "
"the size to include more.",
RuntimeWarning,
)
if optical_density:
# Generate the null radiograph
x, y, I0 = synthetic_radiograph(obj, size=size, bins=bins, ignore_grid=True)
# Calculate I0 as the mean of the non-zero values in the null
# histogram. Zeros are just outside of the illuminate area.
I0 = np.mean(I0[I0 != 0])
# Calculate the optical_density
# ignore any errors resulting from zero values in intensity
with np.errstate(divide="ignore"):
intensity = -np.log10(intensity / I0)
return h * u.m, v * u.m, intensity
|
def synthetic_radiograph(
obj, size=None, bins=None, ignore_grid=False, optical_density=False
):
r"""
Calculate a "synthetic radiograph" (particle count histogram in the
image plane).
Parameters
----------
obj: `dict` or `~plasmapy.diagnostics.charged_particle_radiography.Tracker`
Either a `~plasmapy.diagnostics.charged_particle_radiography.Tracker`
object that has been run, or an output dictionary created by
running `~plasmapy.diagnostics.charged_particle_radiography.Tracker`.
size : `~astropy.units.Quantity`, shape ``(2, 2)``, optional
The size of the detector array, specified as the minimum
and maximum values included in both the horizontal and vertical
directions in the detector plane coordinates. Shape is
``((hmin, hmax), (vmin, vmax))``. If not specified, the size will be
set to include all particles on the detector. Units must be convertable
to meters.
bins : array of integers, shape ``(2)``
The number of bins in each direction in the format
``(hbins, vbins)``. The default is ``(200, 200)``.
ignore_grid: `bool`
If `True`, returns the intensity in the image plane in the absence
of simulated fields.
optical_density: `bool`
If `True`, return the optical density rather than the intensity
.. math::
OD = -log_{10}(Intensity/I_0)
where :math:`Intensity` is the simulation intensity on the
detector plane and :math:`I_0` is the intensity on the detector
plane in the absence of simulated fields. Default is `False`.
If the ``Intensity`` histogram contains zeros, ``OD`` will contain
-`~numpy.inf` values. These can be easily replaced with zeros
if desired for plotting using `~numpy.nan_to_num`
.. code-block::
OD = np.nan_to_num(OD, neginf=0)
Returns
-------
hax : `~astropy.units.Quantity` array shape ``(hbins,)``
The horizontal axis of the synthetic radiograph in meters.
vax : `~astropy.units.Quantity` array shape ``(vbins, )``
The vertical axis of the synthetic radiograph in meters.
intensity : `~numpy.ndarray`, shape ``(hbins, vbins)``
The number of particles counted in each bin of the histogram.
"""
# results_dict will raise an error if the simulation has not been run.
if isinstance(obj, Tracker):
d = obj.results_dict
# Check if dictionary-like (dict or npz file)
elif isinstance(obj, dict):
d = obj
else:
raise ValueError(
"The first argument of synthetic_radiograph must be "
"either a cpr.Tracker or an "
"output dictionary from cpr.Tracker"
)
if bins is None:
bins = [200, 200]
# Note that, at the end of the simulation, all particles were moved
# into the image plane.
# If ignore_grid is True, use the predicted positions in the absence of
# simulated fields
if ignore_grid:
xloc = d["x0"]
yloc = d["y0"]
else:
xloc = d["x"]
yloc = d["y"]
if size is None:
# If a detector size is not given, choose a size based on the
# particle positions
w = np.max([np.max(np.abs(xloc)), np.max(np.abs(yloc))])
size = np.array([[-w, w], [-w, w]]) * u.m
elif not isinstance(size, u.Quantity) or not size.unit.is_equivalent(u.m):
raise TypeError(
"``size`` must be an `~astropy.units.Quantity` "
"object with units convertable to meters."
)
elif size.shape != (2, 2):
raise ValueError(
"``size`` must have shape ``(2,2)`` corresponding to "
"``[[xmin, xmax], [ymin, ymax]]``."
)
# Generate the histogram
intensity, h, v = np.histogram2d(xloc, yloc, range=size.to(u.m).value, bins=bins)
# h, v are the bin edges: compute the centers to produce arrays
# of the right length (then trim off the extra point)
h = ((h + np.roll(h, -1)) / 2)[:-1]
v = ((v + np.roll(v, -1)) / 2)[:-1]
# Throw a warning if < 50% of the particles are included on the
# histogram
percentage = np.sum(intensity) / d["nparticles"]
if percentage < 0.5:
warnings.warn(
f"Only {percentage:.2%} of the particles are shown "
"on this synthetic radiograph. Consider increasing "
"the size to include more.",
RuntimeWarning,
)
if optical_density:
# Generate the null radiograph
x, y, I0 = synthetic_radiograph(obj, size=size, bins=bins, ignore_grid=True)
# Calculate I0 as the mean of the non-zero values in the null
# histogram. Zeros are just outside of the illuminate area.
I0 = np.mean(I0[I0 != 0])
# Calculate the optical_density
# ignore any errors resulting from zero values in intensity
with np.errstate(divide="ignore"):
intensity = -np.log10(intensity / I0)
return h * u.m, v * u.m, intensity
|
32,865 |
def set_tags_from_context(span, context):
# type: (Span, Dict[str, Any]) -> None
"""Helper to extract meta values from a Celery Context"""
for key, tag_name in TAG_KEYS:
value = context.get(key)
# Skip this key if it is not set
if value is None or value == "":
continue
# Skip `timelimit` if it is not set (its default/unset value is a
# tuple or a list of `None` values
if key == "timelimit" and all((_ is None for _ in value)):
continue
# Skip `retries` if its value is `0`
if key == "retries" and value == 0:
continue
span.set_tag(tag_name, value)
|
def set_tags_from_context(span, context):
# type: (Span, Dict[str, Any]) -> None
"""Helper to extract meta values from a Celery Context"""
for key, tag_name in TAG_KEYS:
value = context.get(key)
# Skip this key if it is not set
if value is None or value == "":
continue
# Skip `timelimit` if it is not set (its default/unset value is a
# tuple or a list of `None` values
if key == "timelimit" and all(_ is None for _ in value):
continue
# Skip `retries` if its value is `0`
if key == "retries" and value == 0:
continue
span.set_tag(tag_name, value)
|
46,629 |
def meta_svm(fname_objective: str, fname_cost: str, noise: bool = True) -> Tuple[UserFunctionWrapper, ParameterSpace]:
"""
Interface to the Meta-SVM benchmark which imitates the hyperparameter optimization of a
support vector machine on OpenML like classification datasets.
Offline generated function samples can be download here:
http://www.ml4aad.org/wp-content/uploads/2019/05/profet_data.tar.gz
NOTE: make sure that the index for the objective function and the cost function match,
e.g for sample_objective_i.pkl and sample_cost.pkl the index i should be the same.
For further information about Profet and the generated meta-surrogate benchmarks see:
Meta-Surrogate Benchmarking for Hyperparameter Optimization
A. Klein and Z. Dai and F. Hutter and N. Lawrence and J. Gonzalez
arXiv:1905.12982 [cs.LG] (2019)
:param fname_objective: filename for the objective function
:param fname_cost: filename for the cost function
:param noise: determines whether to add noise on the function value or not
:return: Tuple of user function object and parameter space
"""
parameter_space = ParameterSpace([
ContinuousParameter('log_C', 0, 1), # scaled to [0, 1], original space was in [-10, 10]
ContinuousParameter('log_gamma', 0, 1)]) # scaled to [0, 1] original space was in [-10, 10]
data = pickle.load(open(fname_objective, "rb"))
x_mean_objective = data["x_mean"]
x_std_objective = data["x_std"]
task_feature_objective = data["task_feature"]
objective = get_default_architecture_classification(x_mean_objective.shape[0]).float()
objective.load_state_dict(data["state_dict"])
data = pickle.load(open(fname_cost, "rb"))
x_mean_cost = data["x_mean"]
x_std_cost = data["x_std"]
y_mean_cost = data["y_mean"]
y_std_cost = data["y_std"]
task_feature_cost = data["task_feature"]
cost = get_default_architecture_cost(x_mean_cost.shape[0]).float()
cost.load_state_dict(data["state_dict"])
def objective_function(config, with_noise=True):
Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
o = objective.forward(x_norm).data.numpy()
m = o[:, 0]
log_v = o[:, 1]
if with_noise:
feval = np.random.randn() * np.sqrt(np.exp(log_v)) + m
else:
feval = m
Ht = np.repeat(task_feature_cost[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_cost) / x_std_cost).float()
o = cost.forward(x_norm).data.numpy()
log_m = o[:, 0] * y_std_cost + y_mean_cost
log_log_v = o[:, 1] * y_std_cost ** 2
if with_noise:
log_c = np.random.randn() * np.sqrt(np.exp(log_log_v)) + log_m
else:
log_c = log_m
return feval[:, None], np.exp(log_c)[:, None]
f = partial(objective_function, with_noise=noise)
return f, parameter_space
|
def meta_svm(fname_objective: str, fname_cost: str, noise: bool = True) -> Tuple[UserFunctionWrapper, ParameterSpace]:
"""
Interface to the Meta-SVM benchmark which imitates the hyperparameter optimization of a
support vector machine on OpenML like classification datasets.
Offline generated function samples can be download here:
http://www.ml4aad.org/wp-content/uploads/2019/05/profet_data.tar.gz
NOTE: make sure that the index for the objective function and the cost function match,
e.g for sample_objective_i.pkl and sample_cost_i.pkl the index i should be the same.
For further information about Profet and the generated meta-surrogate benchmarks see:
Meta-Surrogate Benchmarking for Hyperparameter Optimization
A. Klein and Z. Dai and F. Hutter and N. Lawrence and J. Gonzalez
arXiv:1905.12982 [cs.LG] (2019)
:param fname_objective: filename for the objective function
:param fname_cost: filename for the cost function
:param noise: determines whether to add noise on the function value or not
:return: Tuple of user function object and parameter space
"""
parameter_space = ParameterSpace([
ContinuousParameter('log_C', 0, 1), # scaled to [0, 1], original space was in [-10, 10]
ContinuousParameter('log_gamma', 0, 1)]) # scaled to [0, 1] original space was in [-10, 10]
data = pickle.load(open(fname_objective, "rb"))
x_mean_objective = data["x_mean"]
x_std_objective = data["x_std"]
task_feature_objective = data["task_feature"]
objective = get_default_architecture_classification(x_mean_objective.shape[0]).float()
objective.load_state_dict(data["state_dict"])
data = pickle.load(open(fname_cost, "rb"))
x_mean_cost = data["x_mean"]
x_std_cost = data["x_std"]
y_mean_cost = data["y_mean"]
y_std_cost = data["y_std"]
task_feature_cost = data["task_feature"]
cost = get_default_architecture_cost(x_mean_cost.shape[0]).float()
cost.load_state_dict(data["state_dict"])
def objective_function(config, with_noise=True):
Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
o = objective.forward(x_norm).data.numpy()
m = o[:, 0]
log_v = o[:, 1]
if with_noise:
feval = np.random.randn() * np.sqrt(np.exp(log_v)) + m
else:
feval = m
Ht = np.repeat(task_feature_cost[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_cost) / x_std_cost).float()
o = cost.forward(x_norm).data.numpy()
log_m = o[:, 0] * y_std_cost + y_mean_cost
log_log_v = o[:, 1] * y_std_cost ** 2
if with_noise:
log_c = np.random.randn() * np.sqrt(np.exp(log_log_v)) + log_m
else:
log_c = log_m
return feval[:, None], np.exp(log_c)[:, None]
f = partial(objective_function, with_noise=noise)
return f, parameter_space
|
14,265 |
def _initialise_testbench_(argv_):
# The body of this function is split in two because no coverage is collected on
# the function that starts the coverage. By splitting it in two we get coverage
# on most of the function.
global argc, argv
argv = argv_
argc = len(argv)
root_name = os.getenv("TOPLEVEL")
if root_name is not None:
root_name = root_name.strip()
if root_name == "":
root_name = None
elif '.' in root_name:
# Skip any library component of the toplevel
root_name = root_name.split(".", 1)[1]
# sys.path normally includes "" (the current directory), but does not appear to when python is embedded.
# Add it back because users expect to be able to import files in their test directory.
# TODO: move this to gpi_embed.cpp
sys.path.insert(0, "")
_setup_logging()
# From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners
# If the user doesn't want to see these, they can always change the global
# warning settings in their test module.
if not sys.warnoptions:
warnings.simplefilter("default")
from cocotb import simulator
global SIM_NAME, SIM_VERSION
SIM_NAME = simulator.get_simulator_product().strip()
SIM_VERSION = simulator.get_simulator_version().strip()
cocotb.log.info(f"Running on {SIM_NAME} version {SIM_VERSION}")
memcheck_port = os.getenv('MEMCHECK')
if memcheck_port is not None:
mem_debug(int(memcheck_port))
log.info("Running tests with cocotb v%s from %s" %
(__version__, os.path.dirname(__file__)))
# Create the base handle type
process_plusargs()
global scheduler
scheduler = Scheduler()
# Seed the Python random number generator to make this repeatable
global RANDOM_SEED
RANDOM_SEED = os.getenv('RANDOM_SEED')
if RANDOM_SEED is None:
if 'ntb_random_seed' in plusargs:
RANDOM_SEED = eval(plusargs['ntb_random_seed'])
elif 'seed' in plusargs:
RANDOM_SEED = eval(plusargs['seed'])
else:
RANDOM_SEED = int(time.time())
log.info("Seeding Python random module with %d" % (RANDOM_SEED))
else:
RANDOM_SEED = int(RANDOM_SEED)
log.info("Seeding Python random module with supplied seed %d" % (RANDOM_SEED))
random.seed(RANDOM_SEED)
# Setup DUT object
from cocotb import simulator
handle = simulator.get_root_handle(root_name)
if not handle:
raise RuntimeError(f"Can not find root handle ({root_name})")
global top
top = cocotb.handle.SimHandle(handle)
try:
import pytest
except ImportError:
log.warning(
"Install pytest to enable better AssertionError messages")
else:
try:
# Install the assertion rewriting hook, which must be done before we
# import the test modules.
from _pytest.config import Config
from _pytest.assertion import install_importhook
pytest_conf = Config.fromdictargs({}, ['--capture=no'])
install_importhook(pytest_conf)
except Exception:
log.exception(
"Configuring the assertion rewrite hook using pytest {} failed. "
"Please file a bug report!".format(pytest.__version__))
# start Regression Manager
global regression_manager
regression_manager = RegressionManager.from_discovery(top)
regression_manager.execute()
|
def _initialise_testbench_(argv_):
# The body of this function is split in two because no coverage is collected on
# the function that starts the coverage. By splitting it in two we get coverage
# on most of the function.
global argc, argv
argv = argv_
argc = len(argv)
root_name = os.getenv("TOPLEVEL")
if root_name is not None:
root_name = root_name.strip()
if root_name == "":
root_name = None
elif '.' in root_name:
# Skip any library component of the toplevel
root_name = root_name.split(".", 1)[1]
# sys.path normally includes "" (the current directory), but does not appear to when python is embedded.
# Add it back because users expect to be able to import files in their test directory.
# TODO: move this to gpi_embed.cpp
sys.path.insert(0, "")
_setup_logging()
# From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners
# If the user doesn't want to see these, they can always change the global
# warning settings in their test module.
if not sys.warnoptions:
warnings.simplefilter("default")
from cocotb import simulator
global SIM_NAME, SIM_VERSION
SIM_NAME = simulator.get_simulator_product().strip()
SIM_VERSION = simulator.get_simulator_version().strip()
cocotb.log.info(f"Running on {SIM_NAME} version {SIM_VERSION}")
memcheck_port = os.getenv('MEMCHECK')
if memcheck_port is not None:
mem_debug(int(memcheck_port))
log.info("Running tests with cocotb v%s from %s" %
(__version__, os.path.dirname(__file__)))
# Create the base handle type
process_plusargs()
global scheduler
scheduler = Scheduler()
# Seed the Python random number generator to make this repeatable
global RANDOM_SEED
RANDOM_SEED = os.getenv('RANDOM_SEED')
if RANDOM_SEED is None:
if 'ntb_random_seed' in plusargs:
RANDOM_SEED = eval(plusargs['ntb_random_seed'])
elif 'seed' in plusargs:
RANDOM_SEED = eval(plusargs['seed'])
else:
RANDOM_SEED = int(time.time())
log.info("Seeding Python random module with %d" % (RANDOM_SEED))
else:
RANDOM_SEED = int(RANDOM_SEED)
log.info("Seeding Python random module with supplied seed %d" % (RANDOM_SEED))
random.seed(RANDOM_SEED)
# Setup DUT object
from cocotb import simulator
handle = simulator.get_root_handle(root_name)
if not handle:
raise RuntimeError(f"Can not find root handle ({root_name})")
global top
top = cocotb.handle.SimHandle(handle)
try:
import pytest
except ImportError:
log.info(
"Install pytest to enable better AssertionError messages")
else:
try:
# Install the assertion rewriting hook, which must be done before we
# import the test modules.
from _pytest.config import Config
from _pytest.assertion import install_importhook
pytest_conf = Config.fromdictargs({}, ['--capture=no'])
install_importhook(pytest_conf)
except Exception:
log.exception(
"Configuring the assertion rewrite hook using pytest {} failed. "
"Please file a bug report!".format(pytest.__version__))
# start Regression Manager
global regression_manager
regression_manager = RegressionManager.from_discovery(top)
regression_manager.execute()
|
3,426 |
def get_project_config(project, full_config=True, project_keys=None):
"""
Constructs the ProjectConfig information.
:param project: The project to load configuration for. Ensure that
organization is bound on this object; otherwise it will be loaded from
the database.
:param full_config: True if only the full config is required, False
if only the restricted (for external relays) is required
(default True, i.e. full configuration)
:param project_keys: Pre-fetched project keys for performance. However, if
no project keys are provided it is assumed that the config does not
need to contain auth information (this is the case when used in
python's StoreView)
:return: a ProjectConfig object for the given project
"""
metrics.incr("sentry.relay.config.get_project_config")
with configure_scope() as scope:
scope.set_tag("project", project.id)
if project.status != ObjectStatus.VISIBLE:
return ProjectConfig(project, disabled=True)
public_keys = get_public_key_configs(project, full_config, project_keys=project_keys)
with Hub.current.start_span(op="get_public_config"):
now = datetime.utcnow().replace(tzinfo=utc)
cfg = {
"disabled": False,
"slug": project.slug,
"lastFetch": now,
"lastChange": project.get_option("sentry:relay-rev-lastchange", now),
"rev": project.get_option("sentry:relay-rev", uuid.uuid4().hex),
"publicKeys": public_keys,
"config": {
"allowedDomains": list(get_origins(project)),
"trustedRelays": [
r["public_key"]
for r in project.organization.get_option("sentry:trusted-relays", [])
if r
],
"piiConfig": get_pii_config(project),
"datascrubbingSettings": get_datascrubbing_settings(project),
"features": get_exposed_features(project),
},
"organizationId": project.organization_id,
"projectId": project.id, # XXX: Unused by Relay, required by Python store
}
allow_dynamic_sampling = features.has(
"organizations:filters-and-sampling",
project.organization,
)
if allow_dynamic_sampling:
dynamic_sampling = project.get_option("sentry:dynamic_sampling")
if dynamic_sampling is not None:
cfg["config"]["dynamicSampling"] = dynamic_sampling
if not full_config:
# This is all we need for external Relay processors
return ProjectConfig(project, **cfg)
if features.has("organizations:performance-ops-breakdown", project.organization):
cfg["config"]["breakdownsV2"] = project.get_option("sentry:breakdowns")
if features.has("organizations:transaction-metrics-extraction", project.organization):
cfg["config"]["transactionMetrics"] = get_transaction_metrics_settings(
project, cfg["config"].get("breakdownsV2")
)
# This config key is technically not specific to _transaction_ metrics,
# is however currently both only applied to transaction metrics in
# Relay, and only used to tag transaction metrics in Sentry.
try:
cfg["config"]["metricConditionalTagging"] = get_metric_conditional_tagging_rules(
project
)
except Exception:
capture_exception()
if features.has("organizations:metrics-extraction", project.organization):
cfg["config"]["sessionMetrics"] = {
"version": 1,
"drop": False,
}
if features.has("projects:performance-suspect-spans-ingestion", project=project):
cfg["config"]["spanAttributes"] = project.get_option("sentry:span_attributes")
with Hub.current.start_span(op="get_filter_settings"):
cfg["config"]["filterSettings"] = get_filter_settings(project)
with Hub.current.start_span(op="get_grouping_config_dict_for_project"):
cfg["config"]["groupingConfig"] = get_grouping_config_dict_for_project(project)
with Hub.current.start_span(op="get_event_retention"):
cfg["config"]["eventRetention"] = quotas.get_event_retention(project.organization)
with Hub.current.start_span(op="get_all_quotas"):
cfg["config"]["quotas"] = get_quotas(project, keys=project_keys)
return ProjectConfig(project, **cfg)
|
def get_project_config(project, full_config=True, project_keys=None):
"""
Constructs the ProjectConfig information.
:param project: The project to load configuration for. Ensure that
organization is bound on this object; otherwise it will be loaded from
the database.
:param full_config: True if only the full config is required, False
if only the restricted (for external relays) is required
(default True, i.e. full configuration)
:param project_keys: Pre-fetched project keys for performance. However, if
no project keys are provided it is assumed that the config does not
need to contain auth information (this is the case when used in
python's StoreView)
:return: a ProjectConfig object for the given project
"""
metrics.incr("sentry.relay.config.get_project_config", sample_rate=1.0)
with configure_scope() as scope:
scope.set_tag("project", project.id)
if project.status != ObjectStatus.VISIBLE:
return ProjectConfig(project, disabled=True)
public_keys = get_public_key_configs(project, full_config, project_keys=project_keys)
with Hub.current.start_span(op="get_public_config"):
now = datetime.utcnow().replace(tzinfo=utc)
cfg = {
"disabled": False,
"slug": project.slug,
"lastFetch": now,
"lastChange": project.get_option("sentry:relay-rev-lastchange", now),
"rev": project.get_option("sentry:relay-rev", uuid.uuid4().hex),
"publicKeys": public_keys,
"config": {
"allowedDomains": list(get_origins(project)),
"trustedRelays": [
r["public_key"]
for r in project.organization.get_option("sentry:trusted-relays", [])
if r
],
"piiConfig": get_pii_config(project),
"datascrubbingSettings": get_datascrubbing_settings(project),
"features": get_exposed_features(project),
},
"organizationId": project.organization_id,
"projectId": project.id, # XXX: Unused by Relay, required by Python store
}
allow_dynamic_sampling = features.has(
"organizations:filters-and-sampling",
project.organization,
)
if allow_dynamic_sampling:
dynamic_sampling = project.get_option("sentry:dynamic_sampling")
if dynamic_sampling is not None:
cfg["config"]["dynamicSampling"] = dynamic_sampling
if not full_config:
# This is all we need for external Relay processors
return ProjectConfig(project, **cfg)
if features.has("organizations:performance-ops-breakdown", project.organization):
cfg["config"]["breakdownsV2"] = project.get_option("sentry:breakdowns")
if features.has("organizations:transaction-metrics-extraction", project.organization):
cfg["config"]["transactionMetrics"] = get_transaction_metrics_settings(
project, cfg["config"].get("breakdownsV2")
)
# This config key is technically not specific to _transaction_ metrics,
# is however currently both only applied to transaction metrics in
# Relay, and only used to tag transaction metrics in Sentry.
try:
cfg["config"]["metricConditionalTagging"] = get_metric_conditional_tagging_rules(
project
)
except Exception:
capture_exception()
if features.has("organizations:metrics-extraction", project.organization):
cfg["config"]["sessionMetrics"] = {
"version": 1,
"drop": False,
}
if features.has("projects:performance-suspect-spans-ingestion", project=project):
cfg["config"]["spanAttributes"] = project.get_option("sentry:span_attributes")
with Hub.current.start_span(op="get_filter_settings"):
cfg["config"]["filterSettings"] = get_filter_settings(project)
with Hub.current.start_span(op="get_grouping_config_dict_for_project"):
cfg["config"]["groupingConfig"] = get_grouping_config_dict_for_project(project)
with Hub.current.start_span(op="get_event_retention"):
cfg["config"]["eventRetention"] = quotas.get_event_retention(project.organization)
with Hub.current.start_span(op="get_all_quotas"):
cfg["config"]["quotas"] = get_quotas(project, keys=project_keys)
return ProjectConfig(project, **cfg)
|
34,261 |
def validate_files(args):
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any errors in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
validator = loop.run_until_complete(Validator.from_importer(file_importer))
domain_is_alright = validator.verify_domain()
if not domain_is_alright:
sys.exit(1)
everything_is_alright = validator.verify_all(not args.fail_on_warnings)
sys.exit(0) if everything_is_alright else sys.exit(1)
|
def validate_files(args):
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any errors in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
validator = loop.run_until_complete(Validator.from_importer(file_importer))
domain_is_alright = validator.verify_domain()
if not domain_is_valid:
sys.exit(1)
everything_is_alright = validator.verify_all(not args.fail_on_warnings)
sys.exit(0) if everything_is_alright else sys.exit(1)
|
3,701 |
def test_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
def g(x):
return -(x-3)*(x-5)*(x-6)
x = np.linspace(0, 3)
y, z = f(x), g(x)
yz = np.column_stack((y,z))
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
[q,r] = Poly.fit(x, yz, 3)
assert_almost_equal(r.domain, [0, 3])
assert_almost_equal(q(x), y)
assert_almost_equal(r(x), z)
assert_equal(r.degree(), 3)
print("testing")
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
[q,r] = Poly.fit(x, yz, 3, domain=d, window=w)
assert_almost_equal(q(x), y)
assert_almost_equal(r(x), z)
assert_almost_equal(q.domain, d)
assert_almost_equal(r.domain, d)
assert_almost_equal(q.window, w)
assert_almost_equal(r.window, w)
[q,r] = Poly.fit(x, yz, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(q(x), y)
assert_almost_equal(r(x), z)
assert_almost_equal(q.domain, d)
assert_almost_equal(r.domain, d)
assert_almost_equal(q.window, w)
assert_almost_equal(r.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
[q,r] = Poly.fit(x, yz, 3, [])
assert_equal(q.domain, Poly.domain)
assert_equal(r.domain, Poly.domain)
assert_equal(q.window, Poly.window)
assert_equal(r.window, Poly.window)
[q,r] = Poly.fit(x, yz, [0, 1, 2, 3], [])
assert_equal(q.domain, Poly.domain)
assert_equal(r.domain, Poly.domain)
assert_equal(q.window, Poly.window)
assert_equal(r.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
yzz = yz + random(yz.shape)*.25
[q1, r1] = Poly.fit(x[::2], yzz[::2,:], 3)
[q2, r2] = Poly.fit(x, yzz, 3, w=w)
[q3, r3] = Poly.fit(x, yzz, [0, 1, 2, 3], w=w)
assert_almost_equal(q1(x), q2(x))
assert_almost_equal(q2(x), q3(x))
assert_almost_equal(r1(x), r2(x))
assert_almost_equal(r2(x), r3(x))
|
def test_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
def g(x):
return -(x-3)*(x-5)*(x-6)
x = np.linspace(0, 3)
y, z = f(x), g(x)
yz = np.column_stack((y,z))
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
q, r = Poly.fit(x, yz, 3)
assert_almost_equal(r.domain, [0, 3])
assert_almost_equal(q(x), y)
assert_almost_equal(r(x), z)
assert_equal(r.degree(), 3)
print("testing")
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
[q,r] = Poly.fit(x, yz, 3, domain=d, window=w)
assert_almost_equal(q(x), y)
assert_almost_equal(r(x), z)
assert_almost_equal(q.domain, d)
assert_almost_equal(r.domain, d)
assert_almost_equal(q.window, w)
assert_almost_equal(r.window, w)
[q,r] = Poly.fit(x, yz, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(q(x), y)
assert_almost_equal(r(x), z)
assert_almost_equal(q.domain, d)
assert_almost_equal(r.domain, d)
assert_almost_equal(q.window, w)
assert_almost_equal(r.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
[q,r] = Poly.fit(x, yz, 3, [])
assert_equal(q.domain, Poly.domain)
assert_equal(r.domain, Poly.domain)
assert_equal(q.window, Poly.window)
assert_equal(r.window, Poly.window)
[q,r] = Poly.fit(x, yz, [0, 1, 2, 3], [])
assert_equal(q.domain, Poly.domain)
assert_equal(r.domain, Poly.domain)
assert_equal(q.window, Poly.window)
assert_equal(r.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
yzz = yz + random(yz.shape)*.25
[q1, r1] = Poly.fit(x[::2], yzz[::2,:], 3)
[q2, r2] = Poly.fit(x, yzz, 3, w=w)
[q3, r3] = Poly.fit(x, yzz, [0, 1, 2, 3], w=w)
assert_almost_equal(q1(x), q2(x))
assert_almost_equal(q2(x), q3(x))
assert_almost_equal(r1(x), r2(x))
assert_almost_equal(r2(x), r3(x))
|
9,690 |
def main():
"""Validate BOTMETA"""
path = '.github/BOTMETA.yml'
try:
with open(path, 'r') as f_path:
botmeta = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
except Exception as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
files_schema = Any(
Schema(*string_types),
Schema({
'ignored': Any(list_string_types, *string_types),
'keywords': Any(list_string_types, *string_types),
'labels': Any(list_string_types, *string_types),
'maintainers': Any(list_string_types, *string_types),
'notified': Any(list_string_types, *string_types),
'supershipit': Any(list_string_types, *string_types),
'support': Any("core", "network", "community"),
})
)
list_dict_file_schema = [{str_type: files_schema}
for str_type in string_types]
schema = Schema({
Required('automerge'): bool,
Required('files'): Any(None, *list_dict_file_schema),
Required('macros'): dict, # Any(*list_macros_schema),
})
# Ensure schema is valid
try:
schema(botmeta)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(botmeta, error)))
# Ensure botmeta is always support:core
botmeta_support = botmeta.get('files', {}).get('.github/BOTMETA.yml', {}).get('support', '')
if botmeta_support != 'core':
print('%s:%d:%d: .github/BOTMETA.yml MUST be support: core' % (path, 0, 0))
# Find all path (none-team) macros so we can substitute them
macros = botmeta.get('macros', {})
path_macros = []
team_macros = []
for macro in macros:
if macro.startswith('team_'):
team_macros.append('$'+macro)
else:
path_macros.append(macro)
# Validate files
for file in botmeta['files']:
# maintainers can be:
# implicit: $modules/command/shell.py $team_foo
# maintainer (string): maintainers: $team_foo fred steve
# maintainer (list): maintainers:
# - $team_foo
# - fred
if isinstance(botmeta.get('files', {}).get(file, ''), str):
maintainers = botmeta.get('files', {}).get(file, '').split(' ')
validate_maintainers(maintainers, team_macros, path, file)
elif botmeta.get('files', {}).get(file, '').get('maintainers', ''):
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), str):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '').split(' ')
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), list):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '')
validate_maintainers(maintainers, team_macros, path, file)
for macro in path_macros:
file = file.replace('$' + macro, botmeta.get('macros', {}).get(macro, ''))
if not os.path.exists(file):
# Not a file or directory, though maybe the prefix to one?
# https://github.com/ansible/ansibullbot/pull/1023
if not glob.glob('%s*' % file):
print("%s:%d:%d: Can't find '%s.*' in this branch" % (path, 0, 0, file))
|
def main():
"""Validate BOTMETA"""
path = '.github/BOTMETA.yml'
try:
with open(path, 'r') as f_path:
botmeta = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
except Exception as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
files_schema = Any(
Schema(*string_types),
Schema({
'ignored': Any(list_string_types, *string_types),
'keywords': Any(list_string_types, *string_types),
'labels': Any(list_string_types, *string_types),
'maintainers': Any(list_string_types, *string_types),
'notified': Any(list_string_types, *string_types),
'supershipit': Any(list_string_types, *string_types),
'support': Any("core", "network", "community"),
})
)
list_dict_file_schema = [{str_type: files_schema}
for str_type in string_types]
schema = Schema({
Required('automerge'): bool,
Required('files'): Any(None, *list_dict_file_schema),
Required('macros'): dict, # Any(*list_macros_schema),
})
# Ensure schema is valid
try:
schema(botmeta)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(botmeta, error)))
# Ensure botmeta is always support:core
botmeta_support = botmeta.get('files', {}).get('.github/BOTMETA.yml', {}).get('support', '')
if botmeta_support != 'core':
print('%s:%d:%d: .github/BOTMETA.yml MUST be support: core' % (path, 0, 0))
# Find all path (none-team) macros so we can substitute them
macros = botmeta.get('macros', {})
path_macros = []
team_macros = []
for macro in macros:
if macro.startswith('team_'):
team_macros.append('$'+macro)
else:
path_macros.append(macro)
# Validate files
for file in botmeta['files']:
# maintainers can be:
# implicit: $modules/command/shell.py $team_foo
# maintainer (string): maintainers: $team_foo fred steve
# maintainer (list): maintainers:
# - $team_foo
# - fred
if isinstance(botmeta.get('files', {}).get(file, ''), str):
maintainers = botmeta.get('files', {}).get(file, '').split(' ')
validate_maintainers(maintainers, team_macros, path, file)
elif botmeta.get('files', {}).get(file, '').get('maintainers', ''):
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), str):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '').split(' ')
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), list):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '')
validate_maintainers(maintainers, team_macros, path, file)
for macro in path_macros:
file = file.replace('$' + macro, botmeta.get('macros', {}).get(macro, ''))
if not os.path.exists(file):
# Not a file or directory, though maybe the prefix to one?
# https://github.com/ansible/ansibullbot/pull/1023
if not glob.glob('%s*' % file):
print("%s:%d:%d: Can't find '%s.*' in this branch" % (path, 0, 0, file))
|
14,131 |
def clip(gdf, mask, keep_geom_type=False):
"""Clip points, lines, or polygon geometries to the mask extent.
Both layers must be in the same Coordinate Reference System (CRS).
The `gdf` will be clipped to the full extent of the clip object.
If there are multiple polygons in mask, data from `gdf` will be
clipped to the total boundary of all polygons in mask.
If the `mask` is a tuple of `(minx, miny, maxx, maxy)`, a faster rectangle
clipping algorithm will be used. Note that this can lead to slightly different
results in edge cases, e.g. if a line would be reduced to a point, this point might
not be returned.
Parameters
----------
gdf : GeoDataFrame or GeoSeries
Vector layer (point, line, polygon) to be clipped to mask.
mask : GeoDataFrame, GeoSeries, (Multi)Polygon, tuple
Polygon vector layer used to clip `gdf`.
The mask's geometry is dissolved into one geometric feature
and intersected with `gdf`.
If the mask is a tuple of `(minx, miny, maxx, maxy)`, `clip` will use a faster
rectangle clipping (`.clip_by_rect()`), possibly leading to slightly different
results.
keep_geom_type : boolean, default False
If True, return only geometries of original type in case of intersection
resulting in multiple geometry types or GeometryCollections.
If False, return all resulting geometries (potentially mixed-types).
Returns
-------
GeoDataFrame or GeoSeries
Vector data (points, lines, polygons) from `gdf` clipped to
polygon boundary from mask.
See also
--------
GeoDataFrame.clip : equivalent GeoDataFrame method
GeoSeries.clip : equivalent GeoSeries method
Examples
--------
Clip points (global cities) with a polygon (the South American continent):
>>> world = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_lowres'))
>>> south_america = world[world['continent'] == "South America"]
>>> capitals = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_cities'))
>>> capitals.shape
(202, 2)
>>> sa_capitals = geopandas.clip(capitals, south_america)
>>> sa_capitals.shape
(12, 2)
"""
if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
raise TypeError(
"'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
)
if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon, tuple)):
raise TypeError(
"'mask' should be GeoDataFrame, GeoSeries,"
f"(Multi)Polygon or 4 element tuple, got {type(mask)}"
)
if isinstance(mask, tuple) and len(mask) != 4:
raise TypeError(
"If 'mask' is a tuple, it must have four values (minx, miny, maxx, maxy)"
)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
if not _check_crs(gdf, mask):
_crs_mismatch_warn(gdf, mask, stacklevel=3)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
box_mask = mask.total_bounds
elif isinstance(mask, tuple):
box_mask = mask
else:
box_mask = mask.bounds
box_gdf = gdf.total_bounds
if not (
((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
):
return gdf.iloc[:0]
if isinstance(mask, (GeoDataFrame, GeoSeries)):
combined_mask = mask.geometry.unary_union
else:
combined_mask = mask
clipped = _clip_gdf_with_mask(gdf, combined_mask)
if keep_geom_type:
geomcoll_concat = (clipped.geom_type == "GeometryCollection").any()
geomcoll_orig = (gdf.geom_type == "GeometryCollection").any()
new_collection = geomcoll_concat and not geomcoll_orig
if geomcoll_orig:
warnings.warn(
"keep_geom_type can not be called on a "
"GeoDataFrame with GeometryCollection."
)
else:
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
# Check that the gdf for multiple geom types (points, lines and/or polys)
orig_types_total = sum(
[
gdf.geom_type.isin(polys).any(),
gdf.geom_type.isin(lines).any(),
gdf.geom_type.isin(points).any(),
]
)
# Check how many geometry types are in the clipped GeoDataFrame
clip_types_total = sum(
[
clipped.geom_type.isin(polys).any(),
clipped.geom_type.isin(lines).any(),
clipped.geom_type.isin(points).any(),
]
)
# Check there aren't any new geom types in the clipped GeoDataFrame
more_types = orig_types_total < clip_types_total
if orig_types_total > 1:
warnings.warn(
"keep_geom_type can not be called on a mixed type GeoDataFrame."
)
elif new_collection or more_types:
orig_type = gdf.geom_type.iloc[0]
if new_collection:
clipped = clipped.explode(index_parts=False)
if orig_type in polys:
clipped = clipped.loc[clipped.geom_type.isin(polys)]
elif orig_type in lines:
clipped = clipped.loc[clipped.geom_type.isin(lines)]
return clipped
|
def clip(gdf, mask, keep_geom_type=False):
"""Clip points, lines, or polygon geometries to the mask extent.
Both layers must be in the same Coordinate Reference System (CRS).
The `gdf` will be clipped to the full extent of the clip object.
If there are multiple polygons in mask, data from `gdf` will be
clipped to the total boundary of all polygons in mask.
If the ``mask`` is a tuple of ``(minx, miny, maxx, maxy)``, a faster rectangle
clipping algorithm will be used. Note that this can lead to slightly different
results in edge cases, e.g. if a line would be reduced to a point, this point might
not be returned.
Parameters
----------
gdf : GeoDataFrame or GeoSeries
Vector layer (point, line, polygon) to be clipped to mask.
mask : GeoDataFrame, GeoSeries, (Multi)Polygon, tuple
Polygon vector layer used to clip `gdf`.
The mask's geometry is dissolved into one geometric feature
and intersected with `gdf`.
If the mask is a tuple of `(minx, miny, maxx, maxy)`, `clip` will use a faster
rectangle clipping (`.clip_by_rect()`), possibly leading to slightly different
results.
keep_geom_type : boolean, default False
If True, return only geometries of original type in case of intersection
resulting in multiple geometry types or GeometryCollections.
If False, return all resulting geometries (potentially mixed-types).
Returns
-------
GeoDataFrame or GeoSeries
Vector data (points, lines, polygons) from `gdf` clipped to
polygon boundary from mask.
See also
--------
GeoDataFrame.clip : equivalent GeoDataFrame method
GeoSeries.clip : equivalent GeoSeries method
Examples
--------
Clip points (global cities) with a polygon (the South American continent):
>>> world = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_lowres'))
>>> south_america = world[world['continent'] == "South America"]
>>> capitals = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_cities'))
>>> capitals.shape
(202, 2)
>>> sa_capitals = geopandas.clip(capitals, south_america)
>>> sa_capitals.shape
(12, 2)
"""
if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
raise TypeError(
"'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
)
if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon, tuple)):
raise TypeError(
"'mask' should be GeoDataFrame, GeoSeries,"
f"(Multi)Polygon or 4 element tuple, got {type(mask)}"
)
if isinstance(mask, tuple) and len(mask) != 4:
raise TypeError(
"If 'mask' is a tuple, it must have four values (minx, miny, maxx, maxy)"
)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
if not _check_crs(gdf, mask):
_crs_mismatch_warn(gdf, mask, stacklevel=3)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
box_mask = mask.total_bounds
elif isinstance(mask, tuple):
box_mask = mask
else:
box_mask = mask.bounds
box_gdf = gdf.total_bounds
if not (
((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
):
return gdf.iloc[:0]
if isinstance(mask, (GeoDataFrame, GeoSeries)):
combined_mask = mask.geometry.unary_union
else:
combined_mask = mask
clipped = _clip_gdf_with_mask(gdf, combined_mask)
if keep_geom_type:
geomcoll_concat = (clipped.geom_type == "GeometryCollection").any()
geomcoll_orig = (gdf.geom_type == "GeometryCollection").any()
new_collection = geomcoll_concat and not geomcoll_orig
if geomcoll_orig:
warnings.warn(
"keep_geom_type can not be called on a "
"GeoDataFrame with GeometryCollection."
)
else:
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
# Check that the gdf for multiple geom types (points, lines and/or polys)
orig_types_total = sum(
[
gdf.geom_type.isin(polys).any(),
gdf.geom_type.isin(lines).any(),
gdf.geom_type.isin(points).any(),
]
)
# Check how many geometry types are in the clipped GeoDataFrame
clip_types_total = sum(
[
clipped.geom_type.isin(polys).any(),
clipped.geom_type.isin(lines).any(),
clipped.geom_type.isin(points).any(),
]
)
# Check there aren't any new geom types in the clipped GeoDataFrame
more_types = orig_types_total < clip_types_total
if orig_types_total > 1:
warnings.warn(
"keep_geom_type can not be called on a mixed type GeoDataFrame."
)
elif new_collection or more_types:
orig_type = gdf.geom_type.iloc[0]
if new_collection:
clipped = clipped.explode(index_parts=False)
if orig_type in polys:
clipped = clipped.loc[clipped.geom_type.isin(polys)]
elif orig_type in lines:
clipped = clipped.loc[clipped.geom_type.isin(lines)]
return clipped
|
21,655 |
def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
"""
Generate a value for comparing two child events for ordering.
The rules for ordering are supposed to be:
1. The 'order' key, if it is valid.
2. The 'origin_server_ts' of the 'm.room.create' event.
3. The 'room_id'.
But we skip step 2 since we may not have any state from the room.
Args:
child: The event for generating a comparison key.
Returns:
The comparison key as a tuple of:
False if the ordering is valid.
The ordering field.
The room ID.
"""
order = child.content.get("order")
# If order is not a string or doesn't meet the requirements, ignore it.
if not isinstance(order, str):
order = None
elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.match(order):
order = None
# Items without an order come last.
return (order is None, order, child.room_id)
|
def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
"""
Generate a value for comparing two child events for ordering.
The rules for ordering are supposed to be:
1. The 'order' key, if it is valid.
2. The 'origin_server_ts' of the 'm.room.create' event.
3. The 'room_id'.
But we skip step 2 since we may not have any state from the room.
Args:
child: The event for generating a comparison key.
Returns:
The comparison key as a tuple of:
False if the ordering is valid.
The ordering field.
The room ID.
"""
order = child.content.get("order")
# If order is not a string or doesn't meet the requirements, ignore it.
if not isinstance(order, str):
order = None
elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order):
order = None
# Items without an order come last.
return (order is None, order, child.room_id)
|
13,764 |
def _log_and_monitor_expected_errors(request, exception, caller):
"""
Adds logging and monitoring for expected errors as needed.
Arguments:
request: The request
exception: The exception
caller: Either 'middleware' or 'drf`
"""
expected_error_settings_dict = _get_expected_error_settings_dict()
if not expected_error_settings_dict:
return
# 'module:class', for example, 'django.core.exceptions:PermissionDenied'
# Note: `Exception` itself doesn't have a module.
exception_module = exception.__module__ if hasattr(exception, '__module__') else ''
module_and_class = f'{exception_module}:{exception.__class__.__name__}'
# Set checked_error_expected_from custom attribute to potentially help find issues where errors are never processed.
set_custom_attribute('checked_error_expected_from', caller)
# check if we already added logging/monitoring from a different caller
request_cache = RequestCache('openedx.core.lib.request_utils')
cached_handled_exception = request_cache.get_cached_response('handled_exception')
if cached_handled_exception.is_found:
cached_module_and_class = cached_handled_exception.value
# exception was already processed by a different caller
if cached_handled_exception.value == module_and_class:
set_custom_attribute('checked_error_expected_from', 'multiple')
return
# Currently, it seems unexpected that middleware and drf will both handle different uncaught exceptions.
# However, since it is possible, we will add an additional attribute and log message and then continue.
set_custom_attribute('unexpected_multiple_exceptions', cached_module_and_class)
log.warning(
"Unexpected scenario where different exceptions are handled by _log_and_monitor_expected_errors. "
"See 'unexpected_multiple_exceptions' custom attribute."
)
request_cache.set('handled_exception', module_and_class)
if module_and_class not in expected_error_settings_dict:
return
module_and_class_with_message = f'{exception_module}:{repr(exception)}'
set_custom_attribute('error_expected', module_and_class_with_message)
expected_error_settings = expected_error_settings_dict[module_and_class]
if expected_error_settings['is_ignored']:
set_custom_attribute('error_ignored', True)
if expected_error_settings['log_error'] or expected_error_settings['log_stack_trace']:
print_stack = expected_error_settings['log_stack_trace']
request_path = request.path if hasattr(request, 'path') else 'request-path-unknown'
log.info('Expected error seen for %s', request_path, exc_info=exception, stack_info=print_stack)
|
def _log_and_monitor_expected_errors(request, exception, caller):
"""
Adds logging and monitoring for expected errors as needed.
Arguments:
request: The request
exception: The exception
caller: Either 'middleware' or 'drf`
"""
expected_error_settings_dict = _get_expected_error_settings_dict()
if not expected_error_settings_dict:
return
# 'module:class', for example, 'django.core.exceptions:PermissionDenied'
# Note: `Exception` itself doesn't have a module.
exception_module = exception.__module__ if hasattr(exception, '__module__') else ''
module_and_class = f'{exception_module}:{exception.__class__.__name__}'
# Set checked_error_expected_from custom attribute to potentially help find issues where errors are never processed.
set_custom_attribute('checked_error_expected_from', caller)
# check if we already added logging/monitoring from a different caller
request_cache = RequestCache('openedx.core.lib.request_utils')
cached_handled_exception = request_cache.get_cached_response('handled_exception')
if cached_handled_exception.is_found:
cached_module_and_class = cached_handled_exception.value
# exception was already processed by a different caller
if cached_handled_exception.value == module_and_class:
set_custom_attribute('checked_error_expected_from', 'multiple')
return
# Currently, it seems unexpected that middleware and drf will both handle different uncaught exceptions.
# However, since it is possible, we will add an additional attribute and log message and then continue.
set_custom_attribute('unexpected_multiple_exceptions', cached_module_and_class)
log.warning(
"Unexpected scenario where different exceptions are handled by _log_and_monitor_expected_errors. "
"See 'unexpected_multiple_exceptions' custom attribute."
)
request_cache.set('handled_exception', module_and_class)
if module_and_class not in expected_error_settings_dict:
return
module_and_class_with_message = f'{exception_module}:{repr(exception)}'
set_custom_attribute('error_expected', module_and_class_with_message)
expected_error_settings = expected_error_settings_dict[module_and_class]
if expected_error_settings['is_ignored']:
set_custom_attribute('error_ignored', True)
if expected_error_settings['log_error'] or expected_error_settings['log_stack_trace']:
print_stack = expected_error_settings['log_stack_trace']
request_path = getattr(request, 'path', default='request-path-unknown')
log.info('Expected error seen for %s', request_path, exc_info=exception, stack_info=print_stack)
|
23,075 |
def meta_from_array(x, ndim=None, dtype=None):
"""Normalize an array to appropriate meta object
Parameters
----------
x: array-like, callable
Either an object that looks sufficiently like a Numpy array,
or a callable that accepts shape and dtype keywords
ndim: int
Number of dimensions of the array
dtype: Numpy dtype
A valid input for ``np.dtype``
Returns
-------
array-like with zero elements of the correct dtype
"""
# If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)
# implement a _meta attribute that are incompatible with Dask Array._meta
if hasattr(x, "_meta") and isinstance(x, Array):
x = x._meta
if dtype is None and x is None:
raise ValueError("You must specify the meta or dtype of the array")
if np.isscalar(x):
x = np.array(x)
if x is None:
x = np.ndarray
if isinstance(x, type):
x = x(shape=(0,) * (ndim or 0), dtype=dtype)
if (
not hasattr(x, "shape")
or not hasattr(x, "dtype")
or not isinstance(x.shape, tuple)
):
return x
if isinstance(x, list) or isinstance(x, tuple):
ndims = [
0
if isinstance(a, numbers.Number)
else a.ndim
if hasattr(a, "ndim")
else len(a)
for a in x
]
a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]
return a if isinstance(x, list) else tuple(x)
if ndim is None:
ndim = x.ndim
try:
meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]
if meta.ndim != ndim:
if ndim > x.ndim:
meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]
meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]
elif ndim == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * ndim)
except Exception:
meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)
if np.isscalar(meta):
meta = np.array(meta)
if dtype and meta.dtype != dtype:
try:
meta = meta.astype(dtype)
except ValueError as e:
if (
any(
[
s in str(e)
for s in [
"invalid literal",
"could not convert string to float",
]
]
)
and meta.dtype.kind == "U"
):
meta = np.array([]).astype(dtype)
else:
raise e
return meta
|
def meta_from_array(x, ndim=None, dtype=None):
"""Normalize an array to appropriate meta object
Parameters
----------
x: array-like, callable
Either an object that looks sufficiently like a Numpy array,
or a callable that accepts shape and dtype keywords
ndim: int
Number of dimensions of the array
dtype: Numpy dtype
A valid input for ``np.dtype``
Returns
-------
array-like with zero elements of the correct dtype
"""
# If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)
# implement a _meta attribute that are incompatible with Dask Array._meta
if hasattr(x, "_meta") and isinstance(x, Array):
x = x._meta
if dtype is None and x is None:
raise ValueError("You must specify the meta or dtype of the array")
if np.isscalar(x):
x = np.array(x)
if x is None:
x = np.ndarray
if isinstance(x, type):
x = x(shape=(0,) * (ndim or 0), dtype=dtype)
if (
not hasattr(x, "shape")
or not hasattr(x, "dtype")
or not isinstance(x.shape, tuple)
):
return x
if isinstance(x, list) or isinstance(x, tuple):
ndims = [
0
if isinstance(a, numbers.Number)
else a.ndim
if hasattr(a, "ndim")
else len(a)
for a in x
]
a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]
return a if isinstance(x, list) else tuple(x)
if ndim is None:
ndim = x.ndim
try:
meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]
if meta.ndim != ndim:
if ndim > x.ndim:
meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]
meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]
elif ndim == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * ndim)
except Exception:
meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)
if np.isscalar(meta):
meta = np.array(meta)
if dtype and meta.dtype != dtype:
try:
meta = meta.astype(dtype)
except ValueError as e:
if (
any(
[
s in str(e)
for s in [
"invalid literal",
"could not convert string to float",
]
]
)
and meta.dtype.kind in "SU"
):
meta = np.array([]).astype(dtype)
else:
raise e
return meta
|
31,797 |
def test_module(client: Client, feature_mapping_field: str) -> str:
result = parse_mappings(feature_mapping_field)
if len(result) < 2:
return "Mapping Error. Please check feature_mapping field"
try:
response = client.test_arcanna()
demisto.info(f'test_module response={response}')
if not response["connected"]:
return "Authentication Error. Please check the API Key you provided."
else:
return "ok"
except DemistoException as e:
raise e
|
def test_module(client: Client, feature_mapping_field: str) -> str:
result = parse_mappings(feature_mapping_field)
if len(result) < 2:
return "Arcanna Mapping Error. Please check your feature_mapping field."
try:
response = client.test_arcanna()
demisto.info(f'test_module response={response}')
if not response["connected"]:
return "Authentication Error. Please check the API Key you provided."
else:
return "ok"
except DemistoException as e:
raise e
|
23,604 |
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
48,648 |
def _parse_jsonc(json_text: str) -> str:
parsed_json_text = re.sub(r"^\s*?//.*?\n", "", json_text, flags=re.RegexFlag.MULTILINE)
parsed_json_text = re.sub(r",([\n\s]*?[\}\]])", r"\1", parsed_json_text)
return parsed_json_text
|
def _parse_jsonc(json_text: str) -> str:
# strip comments from the file
lines = [line for line in text.split('\n') if not line.strip().startswith('//')]
# strip trailing commas from the file
valid_json = re.sub(r",(\s*?[\}\]])", r"\1", "\n".join(lines))
return valid_json
|
36,724 |
def _parsedate_tz(data):
"""Convert date to extended time tuple.
The last (additional) element is the time zone offset in seconds, except if
the timezone was specified as -0000. In that case the last element is
None. This indicates a UTC timestamp that explicitly declaims knowledge of
the source timezone, as opposed to a +0000 timestamp that indicates the
source timezone really was UTC.
"""
if not data:
return None
data = data.split()
if not data: # This happens for whitespace-only input.
return None
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i == -1:
i = s.find('-')
if i > 0:
data[3:] = [s[:i], s[i:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
if not (dd and mm and yy):
return None
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy:
return None
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
elif len(tm) == 1 and '.' in tm[0]:
# Some non-compliant MUAs use '.' to separate time elements.
tm = tm[0].split('.')
if len(tm) == 2:
[thh, tmm] = tm
tss = 0
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
if tzoffset==0 and tz.startswith('-'):
tzoffset = None
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
|
def _parsedate_tz(data):
"""Convert date to extended time tuple.
The last (additional) element is the time zone offset in seconds, except if
the timezone was specified as -0000. In that case the last element is
None. This indicates a UTC timestamp that explicitly declaims knowledge of
the source timezone, as opposed to a +0000 timestamp that indicates the
source timezone really was UTC.
"""
if not data:
return None
data = data.split()
if not data: # This happens for whitespace-only input.
return None
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i == -1:
i = s.find('-')
if i > 0:
data[3:] = [s[:i], s[i:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
if not all((dd, mm, yy)):
return None
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy:
return None
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
elif len(tm) == 1 and '.' in tm[0]:
# Some non-compliant MUAs use '.' to separate time elements.
tm = tm[0].split('.')
if len(tm) == 2:
[thh, tmm] = tm
tss = 0
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
if tzoffset==0 and tz.startswith('-'):
tzoffset = None
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
|
32,093 |
def get_fetch_run_time_with_look_back(last_run, first_fetch, look_back=0, timezone=0, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Gets the time range for fetch
:type last_run: ``dict``
:param last_run: The LastRun object
:type first_fetch: ``str``
:param first_fetch: The first time to fetch, used in the first fetch
:type look_back: ``int``
:param look_back: The time to look back in fetch in minutes
:type timezone: ``int``
:param timezone: The time zone offset in hours
:type date_format: ``str``
:param date_format: The date format
:return: The time range o fetch in
:rtype: ``Tuple``
"""
last_run_time = last_run and 'time' in last_run and last_run['time']
now = datetime.utcnow() + timedelta(hours=timezone)
if not last_run_time:
last_run_time, _ = parse_date_range(first_fetch)
else:
last_run_time = datetime.strptime(last_run_time, date_format)
if look_back > 0:
if now - last_run_time < timedelta(minutes=look_back):
last_run_time = now - timedelta(minutes=look_back)
return last_run_time.strftime(date_format), now.strftime(date_format)
|
def get_fetch_run_time_range(last_run, first_fetch, look_back=0, timezone=0, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Gets the time range for fetch
:type last_run: ``dict``
:param last_run: The LastRun object
:type first_fetch: ``str``
:param first_fetch: The first time to fetch, used in the first fetch
:type look_back: ``int``
:param look_back: The time to look back in fetch in minutes
:type timezone: ``int``
:param timezone: The time zone offset in hours
:type date_format: ``str``
:param date_format: The date format
:return: The time range o fetch in
:rtype: ``Tuple``
"""
last_run_time = last_run and 'time' in last_run and last_run['time']
now = datetime.utcnow() + timedelta(hours=timezone)
if not last_run_time:
last_run_time, _ = parse_date_range(first_fetch)
else:
last_run_time = datetime.strptime(last_run_time, date_format)
if look_back > 0:
if now - last_run_time < timedelta(minutes=look_back):
last_run_time = now - timedelta(minutes=look_back)
return last_run_time.strftime(date_format), now.strftime(date_format)
|
8,767 |
def test_say_long_fit(bot):
"""Test a long message that fit into the 512 bytes limit."""
text = 'a' * (512 - len('PRIVMSG #sopel :\r\n'))
bot.say(text, '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :%s' % text,
)
|
def test_say_long_fit(bot):
"""Test a long message that fits into the 512 bytes limit."""
text = 'a' * (512 - len('PRIVMSG #sopel :\r\n'))
bot.say(text, '#sopel')
assert bot.backend.message_sent == rawlist(
'PRIVMSG #sopel :%s' % text,
)
|
4,561 |
def clean(signals, sessions=None, detrend=True, standardize='zscore',
confounds=None, standardize_confounds=True, filter="butterworth",
low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False):
"""Improve SNR on masked fMRI signals.
This function can do several things on the input signals, in
the following order:
- detrend
- low- and high-pass filter
- remove confounds
- standardize
Low-pass filtering improves specificity.
High-pass filtering should be kept small, to keep some
sensitivity.
Filtering is only meaningful on evenly-sampled signals.
According to Lindquist et al. (2018), removal of confounds will be done
orthogonally to temporal filters (low- and/or high-pass filters), if both
are specified.
Parameters
----------
signals: numpy.ndarray
Timeseries. Must have shape (instant number, features number).
This array is not modified.
sessions : numpy array, optional
Add a session level to the cleaning process. Each session will be
cleaned independently. Must be a 1D array of n_samples elements.
confounds: numpy.ndarray, str, DataFrame or list of
Confounds timeseries. Shape must be
(instant number, confound number), or just (instant number,)
The number of time instants in signals and confounds must be
identical (i.e. signals.shape[0] == confounds.shape[0]).
If a string is provided, it is assumed to be the name of a csv file
containing signals as columns, with an optional one-line header.
If a list is provided, all confounds are removed from the input
signal, as if all were in the same array.
t_r: float
Repetition time, in second (sampling period). Set to None if not.
filter: {'butterworth', False}
Filtering methods.
'butterworth': perform butterworth filtering.
False : Do not perform filtering.
low_pass, high_pass: float
Respectively high and low cutoff frequencies, in Hertz.
detrend: bool
If detrending should be applied on timeseries (before
confound removal)
standardize: {'zscore', 'psc', False}, default is 'zscore'
Strategy to standardize the signal.
'zscore': the signal is z-scored. Timeseries are shifted
to zero mean and scaled to unit variance.
'psc': Timeseries are shifted to zero mean value and scaled
to percent signal change (as compared to original mean signal).
False : Do not standardize the data.
standardize_confounds: boolean, optional, default is True
If standardize_confounds is True, the confounds are z-scored:
their mean is put to 0 and their variance to 1 in the time dimension.
ensure_finite: bool
If True, the non-finite values (NANs and infs) found in the data
will be replaced by zeros.
Returns
-------
cleaned_signals: numpy.ndarray
Input signals, cleaned. Same shape as `signals`.
Notes
-----
Confounds removal is based on a projection on the orthogonal
of the signal space. See `Friston, K. J., A. P. Holmes,
K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
"Statistical Parametric Maps in Functional Imaging: A General
Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
<http://dx.doi.org/10.1002/hbm.460020402>`_
Orthogonalization between temporal filters and confound removal is based on
suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018).
Modular preprocessing pipelines can reintroduce artifacts into fMRI data.
bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_
See Also
--------
nilearn.image.clean_img
"""
# Read confounds and signals
signals, confounds = _sanitize_inputs(signals, confounds, ensure_finite)
# check if filter paramters are satidfied
_ = _check_filter_parameters(filter, low_pass, high_pass, t_r)
# Restrict the signal to the orthogonal of the confounds
if sessions is not None:
signals = _process_session(signals, sessions, detrend, standardize,
confounds, low_pass, high_pass, t_r)
# Detrend
# Detrend and filtering should apply to confounds, if confound presents
# keep filters orthogonal (according to Lindquist et al. (2018))
if detrend:
mean_signals = signals.mean(axis=0)
signals = _standardize(signals, standardize=False, detrend=detrend)
if confounds is not None:
confounds = _standardize(confounds, standardize=False,
detrend=detrend)
# Apply low- and high-pass filters
if filter == "butterworth" and t_r is not None: # this change enticipate extra fltering methods
signals = butterworth(signals, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
if confounds is not None:
# Apply low- and high-pass filters to keep filters orthogonal
# (according to Lindquist et al. (2018))
confounds = butterworth(confounds, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
# if filter == "cosine":
# ...
# Remove confounds
if confounds is not None:
confounds = _standardize(confounds, standardize=standardize_confounds,
detrend=False)
if not standardize_confounds:
# Improve numerical stability by controlling the range of
# confounds. We don't rely on _standardize as it removes any
# constant contribution to confounds.
confound_max = np.max(np.abs(confounds), axis=0)
confound_max[confound_max == 0] = 1
confounds /= confound_max
# Pivoting in qr decomposition was added in scipy 0.10
Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float64).eps * 100.]
signals -= Q.dot(Q.T).dot(signals)
# Standardize
if detrend and (standardize == 'psc'):
# If the signal is detrended, we have to know the original mean
# signal to calculate the psc.
signals = _standardize(signals + mean_signals, standardize=standardize,
detrend=False)
else:
signals = _standardize(signals, standardize=standardize,
detrend=False)
return signals
|
def clean(signals, sessions=None, detrend=True, standardize='zscore',
confounds=None, standardize_confounds=True, filter="butterworth",
low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False):
"""Improve SNR on masked fMRI signals.
This function can do several things on the input signals, in
the following order:
- detrend
- low- and high-pass filter
- remove confounds
- standardize
Low-pass filtering improves specificity.
High-pass filtering should be kept small, to keep some
sensitivity.
Filtering is only meaningful on evenly-sampled signals.
According to Lindquist et al. (2018), removal of confounds will be done
orthogonally to temporal filters (low- and/or high-pass filters), if both
are specified.
Parameters
----------
signals: numpy.ndarray
Timeseries. Must have shape (instant number, features number).
This array is not modified.
sessions : numpy array, optional
Add a session level to the cleaning process. Each session will be
cleaned independently. Must be a 1D array of n_samples elements.
confounds: numpy.ndarray, str, DataFrame or list of
Confounds timeseries. Shape must be
(instant number, confound number), or just (instant number,)
The number of time instants in signals and confounds must be
identical (i.e. signals.shape[0] == confounds.shape[0]).
If a string is provided, it is assumed to be the name of a csv file
containing signals as columns, with an optional one-line header.
If a list is provided, all confounds are removed from the input
signal, as if all were in the same array.
t_r: float
Repetition time, in second (sampling period). Set to None if not.
filter: {'butterworth', False}
Filtering methods.
'butterworth': perform butterworth filtering.
False : Do not perform filtering.
low_pass, high_pass: float
Respectively high and low cutoff frequencies, in Hertz.
detrend: bool
If detrending should be applied on timeseries (before
confound removal)
standardize: {'zscore', 'psc', False}, default is 'zscore'
Strategy to standardize the signal.
'zscore': the signal is z-scored. Timeseries are shifted
to zero mean and scaled to unit variance.
'psc': Timeseries are shifted to zero mean value and scaled
to percent signal change (as compared to original mean signal).
False : Do not standardize the data.
standardize_confounds: boolean, optional, default is True
If standardize_confounds is True, the confounds are z-scored:
their mean is put to 0 and their variance to 1 in the time dimension.
ensure_finite: bool
If True, the non-finite values (NANs and infs) found in the data
will be replaced by zeros.
Returns
-------
cleaned_signals: numpy.ndarray
Input signals, cleaned. Same shape as `signals`.
Notes
-----
Confounds removal is based on a projection on the orthogonal
of the signal space. See `Friston, K. J., A. P. Holmes,
K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
"Statistical Parametric Maps in Functional Imaging: A General
Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
<http://dx.doi.org/10.1002/hbm.460020402>`_
Orthogonalization between temporal filters and confound removal is based on
suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018).
Modular preprocessing pipelines can reintroduce artifacts into fMRI data.
bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_
See Also
--------
nilearn.image.clean_img
"""
# Read confounds and signals
signals, confounds = _sanitize_inputs(signals, confounds, ensure_finite)
# check if filter parameters are satisfied
_ = _check_filter_parameters(filter, low_pass, high_pass, t_r)
# Restrict the signal to the orthogonal of the confounds
if sessions is not None:
signals = _process_session(signals, sessions, detrend, standardize,
confounds, low_pass, high_pass, t_r)
# Detrend
# Detrend and filtering should apply to confounds, if confound presents
# keep filters orthogonal (according to Lindquist et al. (2018))
if detrend:
mean_signals = signals.mean(axis=0)
signals = _standardize(signals, standardize=False, detrend=detrend)
if confounds is not None:
confounds = _standardize(confounds, standardize=False,
detrend=detrend)
# Apply low- and high-pass filters
if filter == "butterworth" and t_r is not None: # this change enticipate extra fltering methods
signals = butterworth(signals, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
if confounds is not None:
# Apply low- and high-pass filters to keep filters orthogonal
# (according to Lindquist et al. (2018))
confounds = butterworth(confounds, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
# if filter == "cosine":
# ...
# Remove confounds
if confounds is not None:
confounds = _standardize(confounds, standardize=standardize_confounds,
detrend=False)
if not standardize_confounds:
# Improve numerical stability by controlling the range of
# confounds. We don't rely on _standardize as it removes any
# constant contribution to confounds.
confound_max = np.max(np.abs(confounds), axis=0)
confound_max[confound_max == 0] = 1
confounds /= confound_max
# Pivoting in qr decomposition was added in scipy 0.10
Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float64).eps * 100.]
signals -= Q.dot(Q.T).dot(signals)
# Standardize
if detrend and (standardize == 'psc'):
# If the signal is detrended, we have to know the original mean
# signal to calculate the psc.
signals = _standardize(signals + mean_signals, standardize=standardize,
detrend=False)
else:
signals = _standardize(signals, standardize=standardize,
detrend=False)
return signals
|
31,648 |
def list_sensors():
target_org = demisto.args().get('target_org', None)
r = send_request('GET', '/rest/v1/sensors', target_org=target_org)
details = demisto.args().get('details', 'false')
keys = ['isAuthorized',
'isNetflowGenerator',
# 'lastActive',
'name']
if details != 'true':
filtered_r = []
#returning only data in keys
for doc in r:
s = {k: doc[k] for k in keys}
filtered_r.append(s)
return filtered_r
else:
for s in r:
del s['lastActive']
return r
|
def list_sensors():
target_org = demisto.args().get('target_org', None)
r = send_request('GET', '/rest/v1/sensors', target_org=target_org)
details = argToBoolean(demisto.args().get('details', 'false'))
keys = ['isAuthorized',
'isNetflowGenerator',
# 'lastActive',
'name']
if details != 'true':
filtered_r = []
#returning only data in keys
for doc in r:
s = {k: doc[k] for k in keys}
filtered_r.append(s)
return filtered_r
else:
for s in r:
del s['lastActive']
return r
|
3,848 |
def geometric_edges(G, radius, p):
"""Returns edge list of node pairs within `radius` of each other
Radius uses Minkowski distance metric `p`.
If scipy available, use scipy KDTree to speed computation.
"""
nodes_pos = G.nodes(data="pos")
try:
import scipy as sp
import scipy.spatial # call as sp.spatial
except ImportError:
# no scipy KDTree so compute by for-loop
radius_p = radius ** p
edges = [
(u, v)
for (u, pu), (v, pv) in combinations(nodes_pos, 2)
if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p
]
return edges
# scipy KDTree is available
nodes, coords = list(zip(*nodes_pos))
kdtree = sp.spatial.KDTree(coords) # Cannot provide generator.
edge_indexes = kdtree.query_pairs(radius, p)
edges = [(nodes[u], nodes[v]) for u, v in edge_indexes]
return edges
|
def geometric_edges(G, radius, p):
"""Returns edge list of node pairs within `radius` of each other
Radius uses Minkowski distance metric `p`.
If scipy available, use scipy KDTree to speed computation.
"""
nodes_pos = G.nodes(data="pos")
try:
import scipy as sp
import scipy.spatial # call as sp.spatial
except ImportError:
# no scipy KDTree so compute by for-loop
radius_p = radius ** p
edges = [
(u, v)
for (u, pu), (v, pv) in combinations(nodes_pos, 2)
if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p
]
return edges
# scipy KDTree is available
nodes, coords = list(zip(*nodes_pos))
kdtree = sp.spatial.cKDTree(coords) # Cannot provide generator.
edge_indexes = kdtree.query_pairs(radius, p)
edges = [(nodes[u], nodes[v]) for u, v in edge_indexes]
return edges
|
5,385 |
def auth(profile=None, **connection_args):
"""
Set up keystone credentials. Only intended to be used within Keystone-enabled modules.
CLI Example:
.. code-block:: bash
salt '*' keystone.auth
"""
__utils__["versions.warn_until"](
"Phosphorous",
(
"The keystone module has been deprecated and will be removed in {version}. "
"Please update to using the keystoneng module"
),
)
kwargs = _get_kwargs(profile=profile, **connection_args)
disc = discover.Discover(auth_url=kwargs["auth_url"])
v2_auth_url = disc.url_for("v2.0")
v3_auth_url = disc.url_for("v3.0")
if v3_auth_url:
global _OS_IDENTITY_API_VERSION
global _TENANTS
_OS_IDENTITY_API_VERSION = 3
_TENANTS = "projects"
kwargs["auth_url"] = v3_auth_url
else:
kwargs["auth_url"] = v2_auth_url
kwargs.pop("user_domain_name")
kwargs.pop("project_domain_name")
auth = generic.Password(**kwargs)
sess = session.Session(auth=auth)
ks_cl = disc.create_client(session=sess)
return ks_cl
|
def auth(profile=None, **connection_args):
"""
Set up keystone credentials. Only intended to be used within Keystone-enabled modules.
CLI Example:
.. code-block:: bash
salt '*' keystone.auth
"""
__utils__["versions.warn_until"](
"Phosphorus",
(
"The keystone module has been deprecated and will be removed in {version}. "
"Please update to using the keystoneng module"
),
)
kwargs = _get_kwargs(profile=profile, **connection_args)
disc = discover.Discover(auth_url=kwargs["auth_url"])
v2_auth_url = disc.url_for("v2.0")
v3_auth_url = disc.url_for("v3.0")
if v3_auth_url:
global _OS_IDENTITY_API_VERSION
global _TENANTS
_OS_IDENTITY_API_VERSION = 3
_TENANTS = "projects"
kwargs["auth_url"] = v3_auth_url
else:
kwargs["auth_url"] = v2_auth_url
kwargs.pop("user_domain_name")
kwargs.pop("project_domain_name")
auth = generic.Password(**kwargs)
sess = session.Session(auth=auth)
ks_cl = disc.create_client(session=sess)
return ks_cl
|
53,751 |
def list_to_flat_grid(xs: List[TensorType]):
"""
:param xs: List with d rank-1 Tensors, with shapes N1, N2, ..., Nd
:return: Tensor with shape [N1*N2*...*Nd, dim] representing the flattened
D-dimensional grid built from the input tensors xs
"""
return tf.reshape(tf.stack(tf.meshgrid(*xs), axis=-1), (-1, len(xs)))
|
def list_to_flat_grid(xs: List[TensorType]):
"""
:param xs: List with d rank-1 Tensors, with shapes N1, N2, ..., Nd
:return: Tensor with shape [N1*N2*...*Nd, d] representing the flattened
d-dimensional grid built from the input tensors xs
"""
return tf.reshape(tf.stack(tf.meshgrid(*xs), axis=-1), (-1, len(xs)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.