id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
22,088 |
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
ln, offset = struct.unpack('H2xI', request[28:36])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
17,618 |
def _process_post_history(body: nodes.field_body) -> list[nodes.Text | nodes.reference]:
new_nodes = []
for pair in body.astext().split(","):
pair = pair.strip()
try:
# if Post-History has no links, ``pair.split(maxsplit=1)``
# will raise ValueError
date, uri = pair.split(maxsplit=1)
node = nodes.reference("",
date.strip(),
refuri=uri.strip(" \f\n\r\t><"),
internal=False
)
except ValueError:
node = nodes.Text(pair)
new_nodes += [node, nodes.Text(", ")]
return new_nodes[:-1] # remove final ', '
|
def _process_post_history(body: nodes.field_body) -> list[nodes.Text | nodes.reference]:
new_nodes = []
for pair in body.astext().split(","):
pair = pair.strip()
try:
# if Post-History has no links, ``pair.split(maxsplit=1)``
# will raise ValueError
date, uri = pair.split(maxsplit=1)
node = nodes.reference("",
date.strip(),
refuri=uri.strip(" \f\n\r\t><"),
internal=False,
)
except ValueError:
node = nodes.Text(pair)
new_nodes += [node, nodes.Text(", ")]
return new_nodes[:-1] # remove final ', '
|
53,948 |
def test_wheel_mode():
@contextlib.contextmanager
def build_wheel(extra_file_defs=None, **kwargs):
file_defs = {
'setup.py': (DALS(
'''
# -*- coding: utf-8 -*-
from setuptools import setup
import setuptools
setup(**%r)
'''
) % kwargs).encode('utf-8'),
}
if extra_file_defs:
file_defs.update(extra_file_defs)
with tempdir() as source_dir:
path.build(file_defs, source_dir)
runsh = pathlib.Path(source_dir) / "script.sh"
os.chmod(runsh, 0o777)
subprocess.check_call((sys.executable, 'setup.py',
'-q', 'bdist_wheel'), cwd=source_dir)
yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
params = dict(
id='script',
file_defs={
'script.py': DALS(
'''
#/usr/bin/python
print('hello world!')
'''
),
'script.sh': DALS(
'''
#/bin/sh
echo 'hello world!'
'''
),
},
setup_kwargs=dict(
scripts=['script.py', 'script.sh'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
{'scripts': [
'script.py',
'script.sh'
]}
]
}
})
)
project_name = params.get('name', 'foo')
version = params.get('version', '1.0')
install_tree = params.get('install_tree')
file_defs = params.get('file_defs', {})
setup_kwargs = params.get('setup_kwargs', {})
with build_wheel(
name=project_name,
version=version,
install_requires=[],
extras_require={},
extra_file_defs=file_defs,
**setup_kwargs
) as filename, tempdir() as install_dir:
_check_wheel_install(filename, install_dir,
install_tree, project_name,
version, None)
w = Wheel(filename)
script_sh = pathlib.Path(install_dir) / w.egg_name() / "EGG-INFO" / "scripts" / "script.sh"
assert script_sh.exists()
assert oct(stat.S_IMODE(script_sh.stat().st_mode)) == "0o777"
|
def test_wheel_mode():
@contextlib.contextmanager
def build_wheel(extra_file_defs=None, **kwargs):
file_defs = {
'setup.py': (DALS(
'''
# -*- coding: utf-8 -*-
from setuptools import setup
import setuptools
setup(**%r)
'''
) % kwargs).encode('utf-8'),
}
if extra_file_defs:
file_defs.update(extra_file_defs)
with tempdir() as source_dir:
path.build(file_defs, source_dir)
runsh = pathlib.Path(source_dir) / "script.sh"
os.chmod(runsh, 0o777)
subprocess.check_call((sys.executable, 'setup.py',
'-q', 'bdist_wheel'), cwd=source_dir)
yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
params = dict(
id='script',
file_defs={
'script.py': DALS(
'''
#/usr/bin/python
print('hello world!')
'''
),
'script.sh': DALS(
'''
#/bin/sh
echo 'hello world!'
'''
),
},
setup_kwargs=dict(
scripts=['script.py', 'script.sh'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
{'scripts': [
'script.py',
'script.sh'
]}
]
}
})
)
project_name = params.get('name', 'foo')
version = params.get('version', '1.0')
install_tree = params.get('install_tree')
file_defs = params.get('file_defs', {})
setup_kwargs = params.get('setup_kwargs', {})
with build_wheel(
name=project_name,
version=version,
install_requires=[],
extras_require={},
extra_file_defs=file_defs,
**setup_kwargs
) as filename, tempdir() as install_dir:
_check_wheel_install(filename, install_dir,
install_tree, project_name,
version, None)
w = Wheel(filename)
base = script_sh = pathlib.Path(install_dir) / w.egg_name()
script_sh = base / "EGG-INFO" / "scripts" / "script.sh"
assert script_sh.exists()
assert oct(stat.S_IMODE(script_sh.stat().st_mode)) == "0o777"
|
11,808 |
def logical_and(image1, image2):
"""Logical AND between two images. At least one of the images must be "1"
mode.
.. code-block:: python
out = ((image1 and image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
|
def logical_and(image1, image2):
"""Logical AND between two images. At least one of the images must be "1"
mode "1".
.. code-block:: python
out = ((image1 and image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
|
36,569 |
def what(file='', h=None):
f = None
try:
if h is None:
if file == '':
raise ValueError("You need specify a str or PathLike for file, "
"or pass a byte stream as h parameter")
if isinstance(file, (str, PathLike)):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
|
def what(file='', h=None):
f = None
try:
if h is None:
if file == '':
raise TypeError("You need specify a str or PathLike for file, "
"or pass a byte stream as h parameter")
if isinstance(file, (str, PathLike)):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
|
928 |
def test_Divergence():
assert Divergence(v1) == Divergence(R.x*R.i + R.z*R.z*R.j)
assert Divergence(v2) == Divergence(R.x*R.i + R.y*R.j + R.z*R.k)
assert Divergence(v1).doit() == 1
assert Divergence(v2).doit() == 3
# issue 22384
Rc = CoordSys3D('R', transformation='cylindrical')
assert divergence(Rc.i) == 1/Rc.r
|
def test_Divergence():
assert Divergence(v1) == Divergence(R.x*R.i + R.z*R.z*R.j)
assert Divergence(v2) == Divergence(R.x*R.i + R.y*R.j + R.z*R.k)
assert Divergence(v1).doit() == 1
assert Divergence(v2).doit() == 3
# issue 22384
Rc = CoordSys3D('R', transformation='cylindrical')
assert Divergence(Rc.i).doit() == 1/Rc.r
|
34,510 |
def _message_clarification(tracker: DialogueStateTracker) -> List[Event]:
clarification = copy.deepcopy(cast(Event, tracker.latest_message))
clarification.parse_data["intent"]["confidence"] = 1.0
clarification.timestamp = time.time()
return [ActionExecuted(ACTION_LISTEN_NAME), clarification]
|
def _message_clarification(tracker: DialogueStateTracker) -> List[Event]:
clarification = copy.deepcopy(cast(UserUttered, tracker.latest_message))
clarification.parse_data["intent"]["confidence"] = 1.0
clarification.timestamp = time.time()
return [ActionExecuted(ACTION_LISTEN_NAME), clarification]
|
34,412 |
def update_existing_keys(
original: Dict[Any, Any], updates: Dict[Any, Any]
) -> Dict[Any, Any]:
"""Iterate through all the updates an update a value in the original dictionary.
If the updates contain a key that is not present in the original dict, it will
be ignored."""
updated = original.copy()
for k, v in updates.items():
if k in updated:
updated[k] = v
return updated
|
def update_existing_keys(
original: Dict[Any, Any], updates: Dict[Any, Any]
) -> Dict[Any, Any]:
"""Iterate through all the updates and update a value in the original dictionary.
If the updates contain a key that is not present in the original dict, it will
be ignored."""
updated = original.copy()
for k, v in updates.items():
if k in updated:
updated[k] = v
return updated
|
56,924 |
def _parse_args_from_docstring(func: Callable[..., Any]) -> Dict[str, str]:
args: Dict[str, str] = {}
if docstring := inspect.cleandoc(inspect.getdoc(func) or "").strip():
# Extract the arguments
# Note: These are loose regexes, but they are good enough for our purposes
# For Google-style, look only at the lines that are indented
section_lines = inspect.cleandoc("\n".join(line for line in docstring.splitlines() if line.startswith(("\t", " "))))
docstring_styles = (
GOOGLE_DOCSTRING_ARG_REGEX.finditer(section_lines),
SPHINX_DOCSTRING_ARG_REGEX.finditer(docstring),
NUMPY_DOCSTRING_ARG_REGEX.finditer(docstring),
)
# Choose the style with the largest number of arguments matched
matched_args = []
actual_args = inspect.signature(func).parameters.keys()
for matches in docstring_styles:
style_matched_args = [match for match in matches if match.group("name") in actual_args]
if len(style_matched_args) > len(matched_args):
matched_args = style_matched_args
# Parse the arguments
for arg in matched_args:
arg_description = re.sub(r"\n\s*", " ", arg.group("description")).strip()
args[arg.group("name")] = arg_description
return args
|
def _parse_args_from_docstring(func: Callable[..., Any]) -> Dict[str, str]:
args: Dict[str, str] = {}
docstring = inspect.getdoc(func).strip()
if docstring:
# Extract the arguments
# Note: These are loose regexes, but they are good enough for our purposes
# For Google-style, look only at the lines that are indented
section_lines = inspect.cleandoc("\n".join(line for line in docstring.splitlines() if line.startswith(("\t", " "))))
docstring_styles = (
GOOGLE_DOCSTRING_ARG_REGEX.finditer(section_lines),
SPHINX_DOCSTRING_ARG_REGEX.finditer(docstring),
NUMPY_DOCSTRING_ARG_REGEX.finditer(docstring),
)
# Choose the style with the largest number of arguments matched
matched_args = []
actual_args = inspect.signature(func).parameters.keys()
for matches in docstring_styles:
style_matched_args = [match for match in matches if match.group("name") in actual_args]
if len(style_matched_args) > len(matched_args):
matched_args = style_matched_args
# Parse the arguments
for arg in matched_args:
arg_description = re.sub(r"\n\s*", " ", arg.group("description")).strip()
args[arg.group("name")] = arg_description
return args
|
35,047 |
def pipeline_executor_enabled():
"""check if pipeline executor enabled.
Return
------
enable: bool
return pipeline executor get enabled or not
"""
pipeline_enabled = False
try:
pipelinecreate = tvm._ffi.get_global_func("tvm.pipeline_executor.create")
assert pipelinecreate
pipeline_enabled = True
except ValueError:
print("pipeline executor not enabled!")
return pipeline_enabled
|
def pipeline_executor_enabled():
"""check if pipeline executor enabled.
Return
------
enable: bool
return pipeline executor get enabled or not
"""
return tvm._ffi.get_global_func("tvm.pipeline_executor.create", allow_missing=True) is not None
|
11,839 |
def grabclipboard():
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".jpg")
os.close(fh)
commands = [
'set theFile to (open for access POSIX file "'
+ filepath
+ '" with write permission)',
"try",
" write (the clipboard as JPEG picture) to theFile",
"end try",
"close access theFile",
]
script = ["osascript"]
for command in commands:
script += ["-e", command]
subprocess.call(script)
im = None
if os.stat(filepath).st_size != 0:
im = Image.open(filepath)
im.load()
os.unlink(filepath)
return im
elif sys.platform == "win32":
data = Image.core.grabclipboard_win32()
if isinstance(data, bytes):
from . import BmpImagePlugin
import io
return BmpImagePlugin.DibImageFile(io.BytesIO(data))
return data
else:
raise IOError("ImageGrab.grabclipboard() is macOS and Windows only")
|
def grabclipboard():
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".jpg")
os.close(fh)
commands = [
'set theFile to (open for access POSIX file "'
+ filepath
+ '" with write permission)',
"try",
" write (the clipboard as JPEG picture) to theFile",
"end try",
"close access theFile",
]
script = ["osascript"]
for command in commands:
script += ["-e", command]
subprocess.call(script)
im = None
if os.stat(filepath).st_size != 0:
im = Image.open(filepath)
im.load()
os.unlink(filepath)
return im
elif sys.platform == "win32":
data = Image.core.grabclipboard_win32()
if isinstance(data, bytes):
from . import BmpImagePlugin
import io
return BmpImagePlugin.DibImageFile(io.BytesIO(data))
return data
else:
raise NotImplementedError("ImageGrab.grabclipboard() is macOS and Windows only")
|
35,930 |
def main(file_path):
exceptions = ["domen", "config", "pool", "audit"]
if file_path not in exceptions:
for path in Path(config_helper.ledger_data_dir).rglob(file_path + "_*"):
shutil.rmtree(str(path))
print('follow directory was deleted: ' + path.name)
else:
print('Can`t delete ledger: ' + file_path)
|
def main(file_path):
exceptions = ["domen", "config", "pool", "audit"]
if file_path not in exceptions:
for path in Path(config_helper.ledger_data_dir).rglob(file_path + "_*"):
shutil.rmtree(str(path))
print('The follow directory was deleted: ' + path.name)
else:
print('Can`t delete ledger: ' + file_path)
|
49,821 |
def get_interpreter_info(path):
"""Return version information of the selected Python interpreter."""
try:
out, __ = run_program(path, ['-V']).communicate()
out = out.decode()
# Needed to prevent showing unexpected output.
# See spyder-ide/spyder#19000
if not re.search(r'^Python \d+\.\d+\.\d+$', out):
out = ''
except Exception:
out = ''
return out.strip()
|
def get_interpreter_info(path):
"""Return version information of the selected Python interpreter."""
try:
out, __ = run_program(path, ['-V']).communicate()
out = out.decode()
# This is necessary to prevent showing unexpected output.
# See spyder-ide/spyder#19000
if not re.search(r'^Python \d+\.\d+\.\d+$', out):
out = ''
except Exception:
out = ''
return out.strip()
|
29,821 |
def configure_and_run_docker_container(
args: argparse.Namespace,
docker_img: str,
instance_config: InstanceConfig,
system_paasta_config: SystemPaastaConfig,
spark_conf: Mapping[str, str],
aws_creds: Tuple[Optional[str], Optional[str], Optional[str]],
cluster_manager: str,
pod_template_path: str,
) -> int:
docker_memory_limit = _calculate_docker_memory_limit(
spark_conf, args.docker_memory_limit
)
docker_cpu_limit = _calculate_docker_cpu_limit(
spark_conf,
args.docker_cpu_limit,
)
if cluster_manager == CLUSTER_MANAGER_MESOS:
volumes = get_volumes_from_spark_mesos_configs(spark_conf)
elif cluster_manager == CLUSTER_MANAGER_K8S:
volumes = get_volumes_from_spark_k8s_configs(spark_conf)
elif cluster_manager == CLUSTER_MANAGER_LOCAL:
# service_configuration_lib puts volumes into the k8s
# configs for local mode
volumes = get_volumes_from_spark_k8s_configs(spark_conf)
else:
raise UnsupportedClusterManagerException(cluster_manager)
volumes.append("%s:rw" % args.work_dir)
volumes.append("/nail/home:/nail/home:rw")
if args.enable_compact_bin_packing:
volumes.append(f"{pod_template_path}:{pod_template_path}:rw")
environment = instance_config.get_env_dictionary() # type: ignore
spark_conf_str = create_spark_config_str(spark_conf, is_mrjob=args.mrjob)
environment.update(
get_spark_env(args, spark_conf_str, aws_creds, spark_conf["spark.ui.port"])
) # type:ignore
webui_url = get_webui_url(spark_conf["spark.ui.port"])
webui_url_msg = f"\nSpark monitoring URL {webui_url}\n"
docker_cmd = get_docker_cmd(args, instance_config, spark_conf_str)
if "history-server" in docker_cmd:
print(f"\nSpark history server URL {webui_url}\n")
elif any(c in docker_cmd for c in ["pyspark", "spark-shell", "spark-submit"]):
signalfx_url = get_signalfx_url(spark_conf)
signalfx_url_msg = f"\nSignalfx dashboard: {signalfx_url}\n"
print(webui_url_msg)
print(signalfx_url_msg)
log.info(webui_url_msg)
log.info(signalfx_url_msg)
history_server_url = get_history_url(spark_conf)
if history_server_url:
history_server_url_msg = (
f"\nAfter the job is finished, you can find the spark UI from {history_server_url}\n"
"Check y/spark-recent-history for faster access to prod logs\n"
)
print(history_server_url_msg)
log.info(history_server_url_msg)
print(f"Selected cluster manager: {cluster_manager}\n")
if clusterman_metrics and _should_get_resource_requirements(docker_cmd, args.mrjob):
try:
if cluster_manager == CLUSTER_MANAGER_MESOS:
print("Sending resource request metrics to Clusterman")
hourly_cost, resources = send_and_calculate_resources_cost(
clusterman_metrics, spark_conf, webui_url, args.pool
)
else:
resources = get_resources_requested(spark_conf)
hourly_cost = get_spark_hourly_cost(
clusterman_metrics,
resources,
spark_conf["spark.executorEnv.PAASTA_CLUSTER"],
args.pool,
)
message = (
f"Resource request ({resources['cpus']} cpus and {resources['mem']} MB memory total)"
f" is estimated to cost ${hourly_cost} per hour"
)
if clusterman_metrics.util.costs.should_warn(hourly_cost):
print(PaastaColors.red(f"WARNING: {message}"))
else:
print(message)
except Boto3Error as e:
print(
PaastaColors.red(
f"Encountered {e} while attempting to send resource requirements to Clusterman."
)
)
if args.suppress_clusterman_metrics_errors:
print(
"Continuing anyway since --suppress-clusterman-metrics-errors was passed"
)
else:
raise
final_spark_submit_cmd_msg = f"Final command: {docker_cmd}"
print(PaastaColors.grey(final_spark_submit_cmd_msg))
log.info(final_spark_submit_cmd_msg)
return run_docker_container(
container_name=spark_conf["spark.app.name"],
volumes=volumes,
environment=environment,
docker_img=docker_img,
docker_cmd=docker_cmd,
dry_run=args.dry_run,
nvidia=args.nvidia,
docker_memory_limit=docker_memory_limit,
docker_cpu_limit=docker_cpu_limit,
)
|
def configure_and_run_docker_container(
args: argparse.Namespace,
docker_img: str,
instance_config: InstanceConfig,
system_paasta_config: SystemPaastaConfig,
spark_conf: Mapping[str, str],
aws_creds: Tuple[Optional[str], Optional[str], Optional[str]],
cluster_manager: str,
pod_template_path: str,
) -> int:
docker_memory_limit = _calculate_docker_memory_limit(
spark_conf, args.docker_memory_limit
)
docker_cpu_limit = _calculate_docker_cpu_limit(
spark_conf,
args.docker_cpu_limit,
)
if cluster_manager == CLUSTER_MANAGER_MESOS:
volumes = get_volumes_from_spark_mesos_configs(spark_conf)
elif cluster_manager in [CLUSTER_MANAGER_K8S, CLUSTER_MANAGER_LOCAL]:
volumes = get_volumes_from_spark_k8s_configs(spark_conf)
elif cluster_manager == CLUSTER_MANAGER_LOCAL:
# service_configuration_lib puts volumes into the k8s
# configs for local mode
volumes = get_volumes_from_spark_k8s_configs(spark_conf)
else:
raise UnsupportedClusterManagerException(cluster_manager)
volumes.append("%s:rw" % args.work_dir)
volumes.append("/nail/home:/nail/home:rw")
if args.enable_compact_bin_packing:
volumes.append(f"{pod_template_path}:{pod_template_path}:rw")
environment = instance_config.get_env_dictionary() # type: ignore
spark_conf_str = create_spark_config_str(spark_conf, is_mrjob=args.mrjob)
environment.update(
get_spark_env(args, spark_conf_str, aws_creds, spark_conf["spark.ui.port"])
) # type:ignore
webui_url = get_webui_url(spark_conf["spark.ui.port"])
webui_url_msg = f"\nSpark monitoring URL {webui_url}\n"
docker_cmd = get_docker_cmd(args, instance_config, spark_conf_str)
if "history-server" in docker_cmd:
print(f"\nSpark history server URL {webui_url}\n")
elif any(c in docker_cmd for c in ["pyspark", "spark-shell", "spark-submit"]):
signalfx_url = get_signalfx_url(spark_conf)
signalfx_url_msg = f"\nSignalfx dashboard: {signalfx_url}\n"
print(webui_url_msg)
print(signalfx_url_msg)
log.info(webui_url_msg)
log.info(signalfx_url_msg)
history_server_url = get_history_url(spark_conf)
if history_server_url:
history_server_url_msg = (
f"\nAfter the job is finished, you can find the spark UI from {history_server_url}\n"
"Check y/spark-recent-history for faster access to prod logs\n"
)
print(history_server_url_msg)
log.info(history_server_url_msg)
print(f"Selected cluster manager: {cluster_manager}\n")
if clusterman_metrics and _should_get_resource_requirements(docker_cmd, args.mrjob):
try:
if cluster_manager == CLUSTER_MANAGER_MESOS:
print("Sending resource request metrics to Clusterman")
hourly_cost, resources = send_and_calculate_resources_cost(
clusterman_metrics, spark_conf, webui_url, args.pool
)
else:
resources = get_resources_requested(spark_conf)
hourly_cost = get_spark_hourly_cost(
clusterman_metrics,
resources,
spark_conf["spark.executorEnv.PAASTA_CLUSTER"],
args.pool,
)
message = (
f"Resource request ({resources['cpus']} cpus and {resources['mem']} MB memory total)"
f" is estimated to cost ${hourly_cost} per hour"
)
if clusterman_metrics.util.costs.should_warn(hourly_cost):
print(PaastaColors.red(f"WARNING: {message}"))
else:
print(message)
except Boto3Error as e:
print(
PaastaColors.red(
f"Encountered {e} while attempting to send resource requirements to Clusterman."
)
)
if args.suppress_clusterman_metrics_errors:
print(
"Continuing anyway since --suppress-clusterman-metrics-errors was passed"
)
else:
raise
final_spark_submit_cmd_msg = f"Final command: {docker_cmd}"
print(PaastaColors.grey(final_spark_submit_cmd_msg))
log.info(final_spark_submit_cmd_msg)
return run_docker_container(
container_name=spark_conf["spark.app.name"],
volumes=volumes,
environment=environment,
docker_img=docker_img,
docker_cmd=docker_cmd,
dry_run=args.dry_run,
nvidia=args.nvidia,
docker_memory_limit=docker_memory_limit,
docker_cpu_limit=docker_cpu_limit,
)
|
46,353 |
def disable(name=None, verbose=True):
if name is not None:
do_unpatch(name)
else:
for key in _get_map_of_algorithms():
do_unpatch(key)
_get_map_of_algorithms.cache_clear()
if verbose and sys.stderr is not None:
sys.stderr.write(
"Intel(R) oneAPI Data Analytics Library solvers for sklearn disabled\n")
logging.warning('Please, do re-import of imported scikit-learn modules '
'after unpatch_sklearn()')
|
def disable(name=None, verbose=True):
if name is not None:
do_unpatch(name)
else:
for key in _get_map_of_algorithms():
do_unpatch(key)
_get_map_of_algorithms.cache_clear()
if verbose and sys.stderr is not None:
sys.stderr.write(
"Intel(R) oneAPI Data Analytics Library solvers for sklearn disabled\n")
logging.warning('Reimport previously imported scikit-learn modules '
'after unpatch_sklearn()')
|
7,674 |
def get_avatar_from_name(first_name):
first_char = first_name[0] if first_name else None
return url_for('assets.avatar', name=first_char)
|
def get_avatar_url_from_name(first_name):
first_char = first_name[0] if first_name else None
return url_for('assets.avatar', name=first_char)
|
23,808 |
def write_toolchain(conanfile, path, output):
if hasattr(conanfile, "toolchain"):
msg = ("\n*****************************************************************\n"
"******************************************************************\n"
"The 'toolchain' attribute or method has been deprecated.\n"
"It will be removed in next Conan release.\n"
"Use 'generators = ClassName' or 'generate()' method instead.\n"
"********************************************************************\n"
"********************************************************************\n")
output.warn(msg)
warnings.warn(msg)
output.highlight("Generating toolchain files")
if callable(conanfile.toolchain):
# This is the toolchain
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "toolchain"):
conanfile.toolchain()
else:
try:
toolchain = {"cmake": CMakeToolchain}[conanfile.toolchain]
except KeyError:
raise ConanException("Unknown toolchain '%s'" % conanfile.toolchain)
tc = toolchain(conanfile)
with chdir(path):
tc.generate()
# TODO: Lets discuss what to do with the environment
if hasattr(conanfile, "generate"):
assert callable(conanfile.generate), "generate should be a method, not an attribute"
output.highlight("Calling generate()")
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "generate"):
conanfile.generate()
|
def write_toolchain(conanfile, path, output):
if hasattr(conanfile, "toolchain"):
msg = ("\n*****************************************************************\n"
"******************************************************************\n"
"The 'toolchain' attribute or method has been deprecated.\n"
"It will be removed in next Conan release.\n"
"Use 'generators = \"ClassName\"' or 'generate()' method instead.\n"
"********************************************************************\n"
"********************************************************************\n")
output.warn(msg)
warnings.warn(msg)
output.highlight("Generating toolchain files")
if callable(conanfile.toolchain):
# This is the toolchain
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "toolchain"):
conanfile.toolchain()
else:
try:
toolchain = {"cmake": CMakeToolchain}[conanfile.toolchain]
except KeyError:
raise ConanException("Unknown toolchain '%s'" % conanfile.toolchain)
tc = toolchain(conanfile)
with chdir(path):
tc.generate()
# TODO: Lets discuss what to do with the environment
if hasattr(conanfile, "generate"):
assert callable(conanfile.generate), "generate should be a method, not an attribute"
output.highlight("Calling generate()")
with chdir(path):
with conanfile_exception_formatter(str(conanfile), "generate"):
conanfile.generate()
|
20,019 |
def spatial_clustering(mask, algorithm="OPTICS", min_cluster_size=5, max_distance=0, njobs=-1):
"""Counts and segments portions of an image based on distance between two pixels.
Masks showing all clusters, plus masks of individual clusters, are returned.
img: Image to segment.
Algorithm: Algorithm to use for segregating different clusters.
Currently supporting OPTICS and DBSCAN. (Default="OPTICS")
min_cluster_size: The minimum size a section of a mask must be (in pixels)
before it can be considered its own cluster. (Default=5)
max_distance: The total distance between two pixels for them to be considered a part
of the same cluster. For the DBSCAN algorithm, value must be between
0 and 1. For OPTICS, the value is in pixels and depends on the size
of your picture. (Default=0)
njobs: The number of processors to use for calculation of the clusters.
Default is all available processors.
"""
al_upper = algorithm.upper()
if "OPTICS" in al_upper:
max_distance = np.inf
elif "DBSCAN" in al_upper:
max_distance = 0.2
else:
raise NameError("Please use only 'OPTICS' or 'DBSCAN' ")
if not max_distance == 0:
max_distance = max_distance
vis = mask
backtorgb = cv2.cvtColor(vis, cv2.COLOR_GRAY2RGB)
x, y = np.where(np.all(backtorgb == [255, 255, 255], axis=2))
zipped = np.column_stack((x, y))
if "OPTICS" in al_upper:
scaled = StandardScaler(with_mean=False, with_std=False).fit_transform(zipped)
db = OPTICS(max_eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
elif "DBSCAN" in al_upper:
scaled = StandardScaler().fit_transform(zipped)
db = DBSCAN(eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
colors = color_palette(n_clusters_ + 1)
dict_of_colors = {}
sub_mask = []
h, w = backtorgb.shape[:2]
image = np.zeros((h, w, 3), np.uint8)
for y in range(-1, n_clusters_ + 1):
dict_of_colors[str(y)] = colors[y]
for y in range(0, n_clusters_):
sub_mask.append(np.zeros((h, w, 3), np.uint8))
dict_of_colors[str(-1)] = (255, 255, 255)
for z in range(0, len(db.labels_)):
if not db.labels_[z] == -1:
sub_mask[db.labels_[z]][zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
image[zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
params.device += 1
if params.debug == 'print':
print_image(image, "full_image_mask.png")
for c in range(0, len(sub_mask)):
print_image(sub_mask[c], "subimage_cluster_" + str(c) + ".png")
elif params.debug == 'plot':
plot_image(image)
for c in range(0, len(sub_mask)):
plot_image(sub_mask[c])
return image, sub_mask
|
def spatial_clustering(mask, algorithm="OPTICS", min_cluster_size=5, max_distance=0, njobs=-1):
"""Counts and segments portions of an image based on distance between two pixels.
Masks showing all clusters, plus masks of individual clusters, are returned.
img: Image to segment.
Algorithm: Algorithm to use for segregating different clusters.
Currently supporting OPTICS and DBSCAN. (Default="OPTICS")
min_cluster_size: The minimum size a section of a mask must be (in pixels)
before it can be considered its own cluster. (Default=5)
max_distance: The total distance between two pixels for them to be considered a part
of the same cluster. For the DBSCAN algorithm, value must be between
0 and 1. For OPTICS, the value is in pixels and depends on the size
of your picture. (Default=0)
njobs: The number of processors to use for calculation of the clusters.
Default is all available processors.
"""
al_upper = algorithm.upper()
if "OPTICS" in al_upper:
max_distance = np.inf
elif "DBSCAN" in al_upper:
max_distance = 0.2
else:
raise NameError("Please use only 'OPTICS' or 'DBSCAN' ")
if not max_distance == 0:
max_distance = max_distance
vis = mask
backtorgb = cv2.cvtColor(vis, cv2.COLOR_GRAY2RGB)
x, y = np.where(np.all(backtorgb == [255, 255, 255], axis=2))
zipped = np.column_stack((x, y))
if "OPTICS" in al_upper:
scaled = StandardScaler(with_mean=False, with_std=False).fit_transform(zipped)
db = OPTICS(max_eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
elif "DBSCAN" in al_upper:
scaled = StandardScaler().fit_transform(zipped)
db = DBSCAN(eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
colors = color_palette(n_clusters_ + 1)
dict_of_colors = {}
sub_mask = []
h, w = backtorgb.shape[:2]
image = np.zeros((h, w, 3), np.uint8)
for y in range(-1, n_clusters_ + 1):
dict_of_colors[str(y)] = colors[y]
for y in range(0, n_clusters_):
sub_mask.append(np.zeros((h, w), np.uint8))
dict_of_colors[str(-1)] = (255, 255, 255)
for z in range(0, len(db.labels_)):
if not db.labels_[z] == -1:
sub_mask[db.labels_[z]][zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
image[zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
params.device += 1
if params.debug == 'print':
print_image(image, "full_image_mask.png")
for c in range(0, len(sub_mask)):
print_image(sub_mask[c], "subimage_cluster_" + str(c) + ".png")
elif params.debug == 'plot':
plot_image(image)
for c in range(0, len(sub_mask)):
plot_image(sub_mask[c])
return image, sub_mask
|
36,355 |
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, collections.defaultdict):
# defaultdict does not have the same constructor than dict and must be
# hendled separately
return type(obj)(obj.default_factory, ((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items()))
elif isinstance(obj, dict):
return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
|
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, collections.defaultdict):
# defaultdict does not have the same constructor than dict and must be
# handled separately
return type(obj)(obj.default_factory, ((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items()))
elif isinstance(obj, dict):
return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
|
32,228 |
def panorama_query_logs_command(args: dict):
"""
Query logs
"""
log_type = args.get('log-type')
number_of_logs = args.get('number_of_logs')
query = args.get('query')
address_src = args.get('addr-src')
address_dst = args.get('addr-dst')
ip_ = args.get('ip')
zone_src = args.get('zone-src')
zone_dst = args.get('zone-dst')
time_generated = args.get('time-generated')
action = args.get('action')
port_dst = args.get('port-dst')
rule = args.get('rule')
filedigest = args.get('filedigest')
url = args.get('url')
target = args.get('target', None)
if query and (address_src or address_dst or zone_src or zone_dst
or time_generated or action or port_dst or rule or url or filedigest):
raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.')
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest, target)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
raise Exception(f"Query logs failed. Reason is: {result['response']['msg']['line']}")
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
query_logs_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'LogType': log_type,
'Message': result['response']['result']['msg']['line']
}
return_results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output}
})
|
def panorama_query_logs_command(args: dict):
"""
Query logs
"""
log_type = args.get('log-type')
number_of_logs = args.get('number_of_logs')
query = args.get('query')
address_src = args.get('addr-src')
address_dst = args.get('addr-dst')
ip_ = args.get('ip')
zone_src = args.get('zone-src')
zone_dst = args.get('zone-dst')
time_generated = args.get('time-generated')
action = args.get('action')
port_dst = args.get('port-dst')
rule = args.get('rule')
filedigest = args.get('filedigest')
url = args.get('url')
target = args.get('target')
if query and (address_src or address_dst or zone_src or zone_dst
or time_generated or action or port_dst or rule or url or filedigest):
raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.')
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest, target)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
raise Exception(f"Query logs failed. Reason is: {result['response']['msg']['line']}")
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
query_logs_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'LogType': log_type,
'Message': result['response']['result']['msg']['line']
}
return_results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output}
})
|
31,950 |
def are_filters_match_response_content(all_filter_arguments: list, api_response: dict) -> bool:
"""
Verify whether any filter arguments of a command match the api response content.
Args:
all_filter_arguments (list[tuple]): pairs of filter arguments inputs & a response key.
api_response (dict): api response.
Returns:
bool: True if in any of the filter arguments there was a match, False otherwise.
"""
for arguments in all_filter_arguments:
command_args, resp_key = arguments
for arg in command_args:
if arg == api_response[resp_key]:
return True
return False
|
def are_filters_match_response_content(all_filter_arguments: list, api_response: dict) -> bool:
"""
Verify whether any filter arguments of a command match the api response content.
Args:
all_filter_arguments (list[tuple]): pairs of filter arguments inputs & a response key.
api_response (dict): api response.
Returns:
bool: True if in any of the filter arguments there was a match, False otherwise.
"""
for arguments in all_filter_arguments:
command_args, resp_key = arguments
for arg in command_args:
if arg == api_response.get(resp_key):
return True
return False
|
31,593 |
def test_module(client: Client) -> str:
"""Tests GoogleMaps by geocoding the address of Demisto's (original) HQ"""
client.google_maps_geocode('45 Rothschild, Tel Aviv', True)
return 'ok' # on any failure, an exception is raised
|
def test_module(client: Client) -> str:
"""Tests GoogleMaps by geocoding a specific address"""
client.google_maps_geocode('45 Rothschild, Tel Aviv', True)
return 'ok' # on any failure, an exception is raised
|
7,679 |
def send_default_avatar(user):
"""Send a user's default avatar as an SVG.
:param user: A `User` object or string (external search results, registrations)
"""
if isinstance(user, str):
text = user[0].upper()
color = get_color_for_user_id(user)
elif user.full_name:
text = user.full_name[0].upper()
color = get_color_for_user_id(user.id)
else:
text = ''
color = '#cccccc'
avatar = render_template('users/avatar.svg', bg_color=color, text=text)
return send_file('avatar.svg', BytesIO(avatar.encode()), mimetype='image/svg+xml',
no_cache=False, inline=True, safe=False, cache_timeout=(86400*7))
|
def send_default_avatar(user):
"""Send a user's default avatar as an SVG.
:param user: A `User` object or string (external search results, registrations)
"""
if isinstance(user, str):
text = user[0].upper()
color = get_color_for_user_id(user)
elif user.full_name:
text = user.full_name[0].upper()
color = get_color_for_user_id(user.id)
else:
text = ''
color = '#cccccc'
avatar = render_template('users/avatar.svg', bg_color=color, text=text)
return send_file('avatar.svg', BytesIO(avatar.encode()), mimetype='image/svg+xml',
no_cache=False, inline=True, safe=False, cache_timeout=(86400*7))
|
29,559 |
def html_visit_altair_plot(self, node):
# Execute the code, saving output and namespace
namespace = node["namespace"]
try:
f = io.StringIO()
with contextlib.redirect_stdout(f):
chart = eval_block(node["code"], namespace)
stdout = f.getvalue()
except Exception as e:
message = "altair-plot: {}:{} Code Execution failed:" "{}: {}".format(
node["rst_source"], node["rst_lineno"], e.__class__.__name__, str(e)
)
if node["strict"]:
raise ValueError(message) from e
else:
warnings.warn(message)
raise nodes.SkipNode
chart_name = node["chart-var-name"]
if chart_name is not None:
if chart_name not in namespace:
raise ValueError(
"chart-var-name='{}' not present in namespace" "".format(chart_name)
)
chart = namespace[chart_name]
output = node["output"]
if output == "none":
raise nodes.SkipNode
elif output == "stdout":
if not stdout:
raise nodes.SkipNode
else:
output_literal = nodes.literal_block(stdout, stdout)
output_literal["language"] = "none"
node.extend([output_literal])
elif output == "repr":
if chart is None:
raise nodes.SkipNode
else:
rep = " " + repr(chart).replace("\n", "\n ")
repr_literal = nodes.literal_block(rep, rep)
repr_literal["language"] = "none"
node.extend([repr_literal])
elif output == "plot":
if isinstance(chart, alt.TopLevelMixin):
# Last line should be a chart; convert to spec dict
try:
spec = chart.to_dict()
except alt.utils.schemapi.SchemaValidationError:
raise ValueError("Invalid chart: {0}".format(node["code"]))
actions = node["links"]
# TODO: add an option to save aspects to file & load from there.
# TODO: add renderer option
# Write spec to a *.vl.json file
# dest_dir = os.path.join(self.builder.outdir, node['relpath'])
# if not os.path.exists(dest_dir):
# os.makedirs(dest_dir)
# filename = "{0}.vl.json".format(node['target_id'])
# dest_path = os.path.join(dest_dir, filename)
# with open(dest_path, 'w') as f:
# json.dump(spec, f)
# Pass relevant info into the template and append to the output
html = VGL_TEMPLATE.render(
div_id=node["div_id"],
spec=json.dumps(spec),
mode="vega-lite",
renderer="canvas",
actions=json.dumps(actions),
)
self.body.append(html)
else:
warnings.warn(
"altair-plot: {}:{} Malformed block. Last line of "
"code block should define a valid altair Chart object."
"".format(node["rst_source"], node["rst_lineno"])
)
raise nodes.SkipNode
|
def html_visit_altair_plot(self, node):
# Execute the code, saving output and namespace
namespace = node["namespace"]
try:
f = io.StringIO()
with contextlib.redirect_stdout(f):
chart = eval_block(node["code"], namespace)
stdout = f.getvalue()
except Exception as e:
message = "altair-plot: {}:{} Code Execution failed:" "{}: {}".format(
node["rst_source"], node["rst_lineno"], e.__class__.__name__, str(e)
)
if node["strict"]:
raise ValueError(message) from e
else:
warnings.warn(message)
raise nodes.SkipNode
chart_name = node["chart-var-name"]
if chart_name is not None:
if chart_name not in namespace:
raise ValueError(
"chart-var-name='{}' not present in namespace" "".format(chart_name)
)
chart = namespace[chart_name]
output = node["output"]
if output == "none":
raise nodes.SkipNode
elif output == "stdout":
if not stdout:
raise nodes.SkipNode
else:
output_literal = nodes.literal_block(stdout, stdout)
output_literal["language"] = "none"
node.extend([output_literal])
elif output == "repr":
if chart is None:
raise nodes.SkipNode
else:
rep = " " + repr(chart).replace("\n", "\n ")
repr_literal = nodes.literal_block(rep, rep)
repr_literal["language"] = "none"
node.extend([repr_literal])
elif output == "plot":
if isinstance(chart, alt.TopLevelMixin):
# Last line should be a chart; convert to spec dict
try:
spec = chart.to_dict()
except alt.utils.schemapi.SchemaValidationError:
raise ValueError("Invalid chart: {0}".format(node["code"]))
actions = node["links"]
# TODO: add an option to save chart specs to file & load from there.
# TODO: add renderer option
# Write spec to a *.vl.json file
# dest_dir = os.path.join(self.builder.outdir, node['relpath'])
# if not os.path.exists(dest_dir):
# os.makedirs(dest_dir)
# filename = "{0}.vl.json".format(node['target_id'])
# dest_path = os.path.join(dest_dir, filename)
# with open(dest_path, 'w') as f:
# json.dump(spec, f)
# Pass relevant info into the template and append to the output
html = VGL_TEMPLATE.render(
div_id=node["div_id"],
spec=json.dumps(spec),
mode="vega-lite",
renderer="canvas",
actions=json.dumps(actions),
)
self.body.append(html)
else:
warnings.warn(
"altair-plot: {}:{} Malformed block. Last line of "
"code block should define a valid altair Chart object."
"".format(node["rst_source"], node["rst_lineno"])
)
raise nodes.SkipNode
|
7,148 |
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
overlap=.5, exclude_border=False):
r"""Finds blobs in the given grayscale image.
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : scalar or sequence of scalars, optional
the minimum standard deviation for Gaussian kernel. Keep this low to
detect smaller blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
max_sigma : scalar or sequence of scalars, optional
The maximum standard deviation for Gaussian kernel. Keep this high to
detect larger blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
sigma_ratio : float, optional
The ratio between the standard deviation of Gaussian Kernels used for
computing the Difference of Gaussians
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
exclude_border : int or bool, optional
If nonzero int, `exclude_border` excludes blobs from
within `exclude_border`-pixels of the border of the image.
Returns
-------
A : (n, image.ndim + sigma) ndarray
A 2d array with each row representing 2 coordinate values for a 2D
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
When a single sigma is passed, outputs are:
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
deviation of the Gaussian kernel which detected the blob. When an
anisotropic gaussian is used (sigmas per dimension), the detected sigma
is returned for each dimension.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach
Examples
--------
>>> from skimage import data, feature
>>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
array([[ 267. , 359. , 16.777216],
[ 267. , 115. , 10.48576 ],
[ 263. , 302. , 16.777216],
[ 263. , 245. , 16.777216],
[ 261. , 173. , 16.777216],
[ 260. , 46. , 16.777216],
[ 198. , 155. , 10.48576 ],
[ 196. , 43. , 10.48576 ],
[ 195. , 102. , 16.777216],
[ 194. , 277. , 16.777216],
[ 193. , 213. , 16.777216],
[ 185. , 347. , 16.777216],
[ 128. , 154. , 10.48576 ],
[ 127. , 102. , 10.48576 ],
[ 125. , 208. , 10.48576 ],
[ 125. , 45. , 16.777216],
[ 124. , 337. , 10.48576 ],
[ 120. , 272. , 16.777216],
[ 58. , 100. , 10.48576 ],
[ 54. , 276. , 10.48576 ],
[ 54. , 42. , 16.777216],
[ 52. , 216. , 16.777216],
[ 52. , 155. , 16.777216],
[ 45. , 336. , 16.777216]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
# Gaussian filter requires that sequence-type sigmas have same
# dimensionality as image. This broadcasts scalar kernels
if isinstance(max_sigma, (int, float)):
max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float)
if isinstance(min_sigma, (int, float)):
min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float)
# Convert sequence types to array
min_sigma = np.asarray(min_sigma, dtype=np.float)
max_sigma = np.asarray(max_sigma, dtype=np.float)
# k such that min_sigma*(sigma_ratio**k) > max_sigma
k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1))
# a geometric progression of standard deviations for gaussian kernels
sigma_list = np.array([min_sigma * (sigma_ratio ** i)
for i in range(k + 1)])
gaussian_images = [gaussian_filter(image, s) for s in sigma_list]
# computing difference between two successive Gaussian blurred images
# multiplying with average standard deviation provides scale invariance
dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
* np.mean(sigma_list[i]) for i in range(k)]
image_cube = np.stack(dog_images, axis=-1)
# local_maxima = get_local_maxima(image_cube, threshold)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3,) * (image.ndim + 1)),
threshold_rel=0.0,
exclude_border=exclude_border)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# translate final column of lm, which contains the index of the
# sigma that produced the maximum intensity value, into the sigma
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
# if the gaussian is isotropic, the stdev across dimensions are
# identical, so return only the stdev deviation of the first dimension
if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,):
sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None]
# Remove sigma index and replace with sigmas
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
return _prune_blobs(lm, overlap)
|
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
overlap=.5, *, exclude_border=False):
r"""Finds blobs in the given grayscale image.
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : scalar or sequence of scalars, optional
the minimum standard deviation for Gaussian kernel. Keep this low to
detect smaller blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
max_sigma : scalar or sequence of scalars, optional
The maximum standard deviation for Gaussian kernel. Keep this high to
detect larger blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
sigma_ratio : float, optional
The ratio between the standard deviation of Gaussian Kernels used for
computing the Difference of Gaussians
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
exclude_border : int or bool, optional
If nonzero int, `exclude_border` excludes blobs from
within `exclude_border`-pixels of the border of the image.
Returns
-------
A : (n, image.ndim + sigma) ndarray
A 2d array with each row representing 2 coordinate values for a 2D
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
When a single sigma is passed, outputs are:
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
deviation of the Gaussian kernel which detected the blob. When an
anisotropic gaussian is used (sigmas per dimension), the detected sigma
is returned for each dimension.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach
Examples
--------
>>> from skimage import data, feature
>>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
array([[ 267. , 359. , 16.777216],
[ 267. , 115. , 10.48576 ],
[ 263. , 302. , 16.777216],
[ 263. , 245. , 16.777216],
[ 261. , 173. , 16.777216],
[ 260. , 46. , 16.777216],
[ 198. , 155. , 10.48576 ],
[ 196. , 43. , 10.48576 ],
[ 195. , 102. , 16.777216],
[ 194. , 277. , 16.777216],
[ 193. , 213. , 16.777216],
[ 185. , 347. , 16.777216],
[ 128. , 154. , 10.48576 ],
[ 127. , 102. , 10.48576 ],
[ 125. , 208. , 10.48576 ],
[ 125. , 45. , 16.777216],
[ 124. , 337. , 10.48576 ],
[ 120. , 272. , 16.777216],
[ 58. , 100. , 10.48576 ],
[ 54. , 276. , 10.48576 ],
[ 54. , 42. , 16.777216],
[ 52. , 216. , 16.777216],
[ 52. , 155. , 16.777216],
[ 45. , 336. , 16.777216]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
# Gaussian filter requires that sequence-type sigmas have same
# dimensionality as image. This broadcasts scalar kernels
if isinstance(max_sigma, (int, float)):
max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float)
if isinstance(min_sigma, (int, float)):
min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float)
# Convert sequence types to array
min_sigma = np.asarray(min_sigma, dtype=np.float)
max_sigma = np.asarray(max_sigma, dtype=np.float)
# k such that min_sigma*(sigma_ratio**k) > max_sigma
k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1))
# a geometric progression of standard deviations for gaussian kernels
sigma_list = np.array([min_sigma * (sigma_ratio ** i)
for i in range(k + 1)])
gaussian_images = [gaussian_filter(image, s) for s in sigma_list]
# computing difference between two successive Gaussian blurred images
# multiplying with average standard deviation provides scale invariance
dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
* np.mean(sigma_list[i]) for i in range(k)]
image_cube = np.stack(dog_images, axis=-1)
# local_maxima = get_local_maxima(image_cube, threshold)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3,) * (image.ndim + 1)),
threshold_rel=0.0,
exclude_border=exclude_border)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# translate final column of lm, which contains the index of the
# sigma that produced the maximum intensity value, into the sigma
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
# if the gaussian is isotropic, the stdev across dimensions are
# identical, so return only the stdev deviation of the first dimension
if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,):
sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None]
# Remove sigma index and replace with sigmas
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
return _prune_blobs(lm, overlap)
|
4,960 |
def test_BoundaryNorm():
"""
GitHub issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 1, 2, 2.2, 4]
# Without interpolation
expected = [-1, 0, 0, 1, 2, 2]
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# ncolors != len(boundaries) - 1 triggers interpolation
expected = [-1, 0, 0, 2, 3, 3]
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# with a single region and interpolation
expected = [-1, 1, 1, 1, 3, 3]
bn = mcolors.BoundaryNorm([0, 2.2], ncolors)
assert_array_equal(bn(vals), expected)
# more boundaries for a third color
boundaries = [0, 1, 2, 3]
vals = [-1, 0.1, 1.1, 2.2, 4]
ncolors = 5
expected = [-1, 0, 2, 4, 5]
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# a scalar as input should not trigger an error and should return a scalar
boundaries = [0, 1, 2]
vals = [-1, 0.1, 1.1, 2.2]
bn = mcolors.BoundaryNorm(boundaries, 2)
expected = [-1, 0, 1, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# same with interp
bn = mcolors.BoundaryNorm(boundaries, 3)
expected = [-1, 0, 2, 3]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Clipping
bn = mcolors.BoundaryNorm(boundaries, 3, clip=True)
expected = [0, 0, 2, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Masked arrays
boundaries = [0, 1.1, 2.2]
vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9])
# Without interpolation
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# With interpolation
bn = mcolors.BoundaryNorm(boundaries, len(boundaries))
expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# Non-trivial masked arrays
vals = np.ma.masked_invalid([np.Inf, np.NaN])
assert np.all(bn(vals).mask)
vals = np.ma.masked_invalid([np.Inf])
assert np.all(bn(vals).mask)
# Incompatible extend and clip
with pytest.raises(ValueError, match="not compatible"):
mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True)
# Too small ncolors argument
with pytest.raises(ValueError, match="ncolors must equal or exceed"):
mcolors.BoundaryNorm(np.arange(4), 2)
with pytest.raises(ValueError, match="ncolors must equal or exceed"):
mcolors.BoundaryNorm(np.arange(4), 3, extend='min')
with pytest.raises(ValueError, match="ncolors must equal or exceed"):
mcolors.BoundaryNorm(np.arange(4), 4, extend='both')
# Testing extend keyword, with interpolation (large cmap)
bounds = [1, 2, 3]
cmap = cm.get_cmap('viridis')
mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both')
refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N)
x = np.random.randn(100) * 10 + 2
ref = refnorm(x)
ref[ref == 0] = -1
ref[ref == cmap.N - 1] = cmap.N
assert_array_equal(mynorm(x), ref)
# Without interpolation
cmref = mcolors.ListedColormap(['blue', 'red'])
cmref.set_over('black')
cmref.set_under('white')
cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black'])
assert_array_equal(cmref.get_over(), mcolors.to_rgba('black'))
assert_array_equal(cmref.get_under(), mcolors.to_rgba('white'))
refnorm = mcolors.BoundaryNorm(bounds, cmref.N)
mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both')
assert mynorm.vmin == refnorm.vmin
assert mynorm.vmax == refnorm.vmax
assert mynorm(bounds[0] - 0.1) == -1 # under
assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color
assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color
assert mynorm(bounds[-1] + 0.1) == cmshould.N # over
x = [-1, 1.2, 2.3, 9.6]
assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3]))
x = np.random.randn(100) * 10 + 2
assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))
# Just min
cmref = mcolors.ListedColormap(['blue', 'red'])
cmref.set_under('white')
cmshould = mcolors.ListedColormap(['white', 'blue', 'red'])
assert_array_equal(cmref.get_under(), mcolors.to_rgba('white'))
assert cmref.N == 2
assert cmshould.N == 3
refnorm = mcolors.BoundaryNorm(bounds, cmref.N)
mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min')
assert mynorm.vmin == refnorm.vmin
assert mynorm.vmax == refnorm.vmax
x = [-1, 1.2, 2.3]
assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2]))
x = np.random.randn(100) * 10 + 2
assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))
# Just max
cmref = mcolors.ListedColormap(['blue', 'red'])
cmref.set_over('black')
cmshould = mcolors.ListedColormap(['blue', 'red', 'black'])
assert_array_equal(cmref.get_over(), mcolors.to_rgba('black'))
assert cmref.N == 2
assert cmshould.N == 3
refnorm = mcolors.BoundaryNorm(bounds, cmref.N)
mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='max')
assert mynorm.vmin == refnorm.vmin
assert mynorm.vmax == refnorm.vmax
x = [1.2, 2.3, 4]
assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2]))
x = np.random.randn(100) * 10 + 2
assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))
|
def test_BoundaryNorm():
"""
GitHub issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 1, 2, 2.2, 4]
# Without interpolation
expected = [-1, 0, 0, 1, 2, 2]
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# ncolors != len(boundaries) - 1 triggers interpolation
expected = [-1, 0, 0, 2, 3, 3]
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# with a single region and interpolation
expected = [-1, 1, 1, 1, 3, 3]
bn = mcolors.BoundaryNorm([0, 2.2], ncolors)
assert_array_equal(bn(vals), expected)
# more boundaries for a third color
boundaries = [0, 1, 2, 3]
vals = [-1, 0.1, 1.1, 2.2, 4]
ncolors = 5
expected = [-1, 0, 2, 4, 5]
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# a scalar as input should not trigger an error and should return a scalar
boundaries = [0, 1, 2]
vals = [-1, 0.1, 1.1, 2.2]
bn = mcolors.BoundaryNorm(boundaries, 2)
expected = [-1, 0, 1, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# same with interp
bn = mcolors.BoundaryNorm(boundaries, 3)
expected = [-1, 0, 2, 3]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Clipping
bn = mcolors.BoundaryNorm(boundaries, 3, clip=True)
expected = [0, 0, 2, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Masked arrays
boundaries = [0, 1.1, 2.2]
vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9])
# Without interpolation
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# With interpolation
bn = mcolors.BoundaryNorm(boundaries, len(boundaries))
expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# Non-trivial masked arrays
vals = np.ma.masked_invalid([np.Inf, np.NaN])
assert np.all(bn(vals).mask)
vals = np.ma.masked_invalid([np.Inf])
assert np.all(bn(vals).mask)
# Incompatible extend and clip
with pytest.raises(ValueError, match="not compatible"):
mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True)
# Too small ncolors argument
with pytest.raises(ValueError, match="ncolors must equal or exceed"):
mcolors.BoundaryNorm(np.arange(4), 2)
with pytest.raises(ValueError, match="ncolors must equal or exceed"):
mcolors.BoundaryNorm(np.arange(4), 3, extend='min')
with pytest.raises(ValueError, match="ncolors must equal or exceed"):
mcolors.BoundaryNorm(np.arange(4), 4, extend='both')
# Testing extend keyword, with interpolation (large cmap)
bounds = [1, 2, 3]
cmap = cm.get_cmap('viridis')
mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both')
refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N)
x = np.random.randn(100) * 10 + 2
ref = refnorm(x)
ref[ref == 0] = -1
ref[ref == cmap.N - 1] = cmap.N
assert_array_equal(mynorm(x), ref)
# Without interpolation
cmref = mcolors.ListedColormap(['blue', 'red'])
cmref.set_over('black')
cmref.set_under('white')
cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black'])
assert mcolors.same_color(cmref.get_over(), 'white')
assert_array_equal(cmref.get_under(), mcolors.to_rgba('white'))
refnorm = mcolors.BoundaryNorm(bounds, cmref.N)
mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both')
assert mynorm.vmin == refnorm.vmin
assert mynorm.vmax == refnorm.vmax
assert mynorm(bounds[0] - 0.1) == -1 # under
assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color
assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color
assert mynorm(bounds[-1] + 0.1) == cmshould.N # over
x = [-1, 1.2, 2.3, 9.6]
assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3]))
x = np.random.randn(100) * 10 + 2
assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))
# Just min
cmref = mcolors.ListedColormap(['blue', 'red'])
cmref.set_under('white')
cmshould = mcolors.ListedColormap(['white', 'blue', 'red'])
assert_array_equal(cmref.get_under(), mcolors.to_rgba('white'))
assert cmref.N == 2
assert cmshould.N == 3
refnorm = mcolors.BoundaryNorm(bounds, cmref.N)
mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min')
assert mynorm.vmin == refnorm.vmin
assert mynorm.vmax == refnorm.vmax
x = [-1, 1.2, 2.3]
assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2]))
x = np.random.randn(100) * 10 + 2
assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))
# Just max
cmref = mcolors.ListedColormap(['blue', 'red'])
cmref.set_over('black')
cmshould = mcolors.ListedColormap(['blue', 'red', 'black'])
assert_array_equal(cmref.get_over(), mcolors.to_rgba('black'))
assert cmref.N == 2
assert cmshould.N == 3
refnorm = mcolors.BoundaryNorm(bounds, cmref.N)
mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='max')
assert mynorm.vmin == refnorm.vmin
assert mynorm.vmax == refnorm.vmax
x = [1.2, 2.3, 4]
assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2]))
x = np.random.randn(100) * 10 + 2
assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))
|
7,444 |
def imsave(fname, arr, **kwargs):
"""Load a tiff image to file.
Parameters
----------
fname : str or file
File name or file-like-object.
arr : ndarray
The array to write
kwargs : keyword pairs, optional
Additional keyword arguments to pass through (see ``tifffile``'s
``imwrite`` function).
Notes
-----
Provided by the tifffile library [1]_, and supports many
advanced image types including multi-page and floating point.
This implementation will set `photomotric='RGB'` when writing if the first
or last axis of arr has shape 3 or 4. To override this, explicitly
specify the photometric kwarg.
This implementation will set `planarconfig='SEPARATE'` when writing if the
first axis of arr has shape 3 or 4. To override this, explicitly
specify the planarconfig kwarg.
References
----------
.. [1] https://pypi.org/project/tifffile/
"""
if arr.shape[0] in [3, 4]:
if 'planarconfig' not in kwargs:
kwargs['planarconfig'] = 'SEPARATE'
rgb = True
else:
rgb = arr.shape[-1] in [3, 4]
if rgb and 'photometric' not in kwargs:
kwargs['photometric'] = 'RGB'
return tifffile_imwrite(fname, arr, **kwargs)
|
def imsave(fname, arr, **kwargs):
"""Load a tiff image to file.
Parameters
----------
fname : str or file
File name or file-like-object.
arr : ndarray
The array to write
kwargs : keyword pairs, optional
Additional keyword arguments to pass through (see ``tifffile``'s
``imwrite`` function).
Notes
-----
Provided by the tifffile library [1]_, and supports many
advanced image types including multi-page and floating-point.
This implementation will set `photomotric='RGB'` when writing if the first
or last axis of arr has shape 3 or 4. To override this, explicitly
specify the photometric kwarg.
This implementation will set `planarconfig='SEPARATE'` when writing if the
first axis of arr has shape 3 or 4. To override this, explicitly
specify the planarconfig kwarg.
References
----------
.. [1] https://pypi.org/project/tifffile/
"""
if arr.shape[0] in [3, 4]:
if 'planarconfig' not in kwargs:
kwargs['planarconfig'] = 'SEPARATE'
rgb = True
else:
rgb = arr.shape[-1] in [3, 4]
if rgb and 'photometric' not in kwargs:
kwargs['photometric'] = 'RGB'
return tifffile_imwrite(fname, arr, **kwargs)
|
24,205 |
def add_style_checker(config, sections, make_envconfig, reader):
# testenv:style
section = '{}{}'.format(tox.config.testenvprefix, STYLE_CHECK_ENV_NAME)
sections[section] = {
'platform': 'linux|darwin|win32',
# These tools require Python 3.6+
# more info: https://github.com/ambv/black/issues/439#issuecomment-411429907
'basepython': 'python3',
'skip_install': 'true',
'deps': '\n'.join(
[
'flake8',
# bugbear version capped due to https://github.com/PyCQA/flake8-bugbear/issues/108
'flake8-bugbear==20.1.0',
'flake8-logging-format',
'black',
'isort[pyproject]>=4.3.15',
]
),
'commands': '\n'.join(
[
'flake8 --config=../.flake8 .',
'black --check --diff .',
'isort --check-only --diff --recursive .',
'python -c "print(\'\\n[WARNING] Complying with following lint rules is recommended, '
'but not mandatory, yet.\')"',
'- flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format`
]
),
}
# Always add the environment configurations
config.envconfigs[STYLE_CHECK_ENV_NAME] = make_envconfig(
config, STYLE_CHECK_ENV_NAME, section, reader._subs, config
)
# Intentionally add to envlist when seeing what is available
if config.option.env is None or config.option.env == STYLE_CHECK_ENV_NAME:
config.envlist_default.append(STYLE_CHECK_ENV_NAME)
|
def add_style_checker(config, sections, make_envconfig, reader):
# testenv:style
section = '{}{}'.format(tox.config.testenvprefix, STYLE_CHECK_ENV_NAME)
sections[section] = {
'platform': 'linux|darwin|win32',
# These tools require Python 3.6+
# more info: https://github.com/ambv/black/issues/439#issuecomment-411429907
'basepython': 'python3',
'skip_install': 'true',
'deps': '\n'.join(
[
'flake8',
# bugbear version capped due to https://github.com/PyCQA/flake8-bugbear/issues/108
'flake8-bugbear<=20.1.0',
'flake8-logging-format',
'black',
'isort[pyproject]>=4.3.15',
]
),
'commands': '\n'.join(
[
'flake8 --config=../.flake8 .',
'black --check --diff .',
'isort --check-only --diff --recursive .',
'python -c "print(\'\\n[WARNING] Complying with following lint rules is recommended, '
'but not mandatory, yet.\')"',
'- flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format`
]
),
}
# Always add the environment configurations
config.envconfigs[STYLE_CHECK_ENV_NAME] = make_envconfig(
config, STYLE_CHECK_ENV_NAME, section, reader._subs, config
)
# Intentionally add to envlist when seeing what is available
if config.option.env is None or config.option.env == STYLE_CHECK_ENV_NAME:
config.envlist_default.append(STYLE_CHECK_ENV_NAME)
|
27,783 |
def _parse_pytest_ini(path: Path) -> PARSE_RESULT:
"""Parse the legacy pytest.ini and return the contents of the pytest section
if the file exists and lacks a pytest section, consider it empty"""
iniconfig = _parse_ini_config(path)
if "pytest" in iniconfig:
return dict(iniconfig["pytest"].items())
else:
# "pytest.ini" files are always the source of configuration, even if empty.
return {}
|
def _parse_pytest_ini(path: Path) -> PARSE_RESULT:
"""Parse the pytest.ini file and return the contents of the pytest section
if the file exists and lacks a pytest section, consider it empty"""
iniconfig = _parse_ini_config(path)
if "pytest" in iniconfig:
return dict(iniconfig["pytest"].items())
else:
# "pytest.ini" files are always the source of configuration, even if empty.
return {}
|
31,526 |
def is_release_branch():
"""
Checks for the current build's branch
Returns:
"""
branch_name = os.getenv('CI_COMMIT_BRANCH')
return re.match(r'[0-9]{2}\.[0-9]{1,2}\.[0-9]', branch_name)
|
def is_release_branch():
"""
Checks for the current build's branch
Returns:
"""
branch_name = os.getenv('CI_COMMIT_BRANCH', '')
return re.match(r'[0-9]{2}\.[0-9]{1,2}\.[0-9]', branch_name)
|
47,281 |
def postprocess_qa_predictions_with_beam_search(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
start_n_top: int = 5,
end_n_top: int = 5,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the
original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as
cls token predictions.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
start_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top start logits too keep when searching for the :obj:`n_best_size` predictions.
end_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top end logits too keep when searching for the :obj:`n_best_size` predictions.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if not len(predictions) == 5:
raise ValueError("`predictions` should be a tuple with five elements.")
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
if not len(predictions[0]) == len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits.
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = i * end_n_top + j
end_index = int(end_indexes[j_index])
# Don't consider out-of-scope answers (last part of the test should be unnecessary because of the
# p_mask but let's not take any risk)
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length negative or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0:
predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction and set the probability for the null answer.
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
|
def postprocess_qa_predictions_with_beam_search(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
start_n_top: int = 5,
end_n_top: int = 5,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the
original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as
cls token predictions.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
start_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top start logits too keep when searching for the :obj:`n_best_size` predictions.
end_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top end logits too keep when searching for the :obj:`n_best_size` predictions.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if not len(predictions) == 5:
raise ValueError("`predictions` should be a tuple with five elements.")
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits.
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = i * end_n_top + j
end_index = int(end_indexes[j_index])
# Don't consider out-of-scope answers (last part of the test should be unnecessary because of the
# p_mask but let's not take any risk)
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length negative or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0:
predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction and set the probability for the null answer.
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
|
31,035 |
def parse_data_pattern_rule(report_json, verdict_field, results_field):
"""Parse data pattern matches for a given rule"""
if report_json.get(verdict_field) != "MATCHED":
return []
data_patterns = []
for dp in report_json.get("scanContentRawReport", {}).get(results_field, []):
if (dp.get("state") == "EVALUATED") and (dp.get("unique_detection_frequency", 0) >= 1):
data_patterns.append({
'DataPatternName': dp["name"],
'LowConfidenceFrequency': dp["low_confidence_frequency"],
'HighConfidenceFrequency': dp["high_confidence_frequency"],
'MediumConfidenceFrequency': dp["medium_confidence_frequency"],
'Detections': dp.get("detections")
})
return data_patterns
|
def parse_data_pattern_rule(report_json, verdict_field, results_field):
"""Parse data pattern matches for a given rule"""
if report_json.get(verdict_field) != "MATCHED":
return []
data_patterns = []
for dp in report_json.get("scanContentRawReport", {}).get(results_field, []):
if (dp.get("state") == "EVALUATED") and (dp.get("unique_detection_frequency", 0) >= 1):
data_patterns.append({
'DataPatternName': dp.get('name'),
'LowConfidenceFrequency': dp["low_confidence_frequency"],
'HighConfidenceFrequency': dp["high_confidence_frequency"],
'MediumConfidenceFrequency': dp["medium_confidence_frequency"],
'Detections': dp.get("detections")
})
return data_patterns
|
30,256 |
def main():
## Global variables declaration
global REPO, OWNER, API, API_KEY
REPO = demisto.params().get('repo_name')
OWNER = demisto.params().get('repo_owner')
API_KEY = demisto.params().get('api_key')
API = 'https://api.github.com/repos/' + OWNER + "/" + REPO
'''EXECUTION CODE'''
COMMANDS = {
"list-all-issues": list_all_issues_command,
'create-issue': create_issue_command,
'close-issue': close_issue_command,
'update-issue': update_issue_command,
'download-count': download_count_command,
'search-issues': search_issues_command,
'fetch-incidents': fetch_incidents_command
}
command = demisto.command()
LOG('GitHub command is: %s' % (command,))
try:
if command == 'test-module':
headers = {'Authorization': 'Bearer ' + API_KEY}
r = requests.request("GET",
API,
headers=headers)
if (r.status_code == 200):
demisto.results('ok')
else:
demisto.results('Unable to connect with the given credentials.')
sys.exit(0)
cmd_func = COMMANDS.get(command)
if cmd_func is None:
raise NotImplemented('Command "%s" is not implemented.') % (cmd_func)
else:
cmd_func()
except Exception as e:
import traceback
return_error('GitHub: {}'.format(str(e)), traceback.format_exc())
|
def main():
## Global variables declaration
global REPO, OWNER, API, API_KEY
REPO = demisto.params().get('repo_name')
OWNER = demisto.params().get('repo_owner')
API_KEY = demisto.params().get('api_key')
API = 'https://api.github.com/repos/' + OWNER + "/" + REPO
'''EXECUTION CODE'''
COMMANDS = {
"list-all-issues": list_all_issues_command,
'create-issue': create_issue_command,
'close-issue': close_issue_command,
'update-issue': update_issue_command,
'download-count': download_count_command,
'search-issues': search_issues_command,
'fetch-incidents': fetch_incidents_command
}
command = demisto.command()
LOG('GitHub command is: %s' % (command,))
try:
if command == 'test-module':
headers = {'Authorization': 'Bearer ' + API_KEY}
r = requests.request("GET",
API,
headers=headers)
if (r.status_code == 200):
demisto.results('ok')
else:
demisto.results('Unable to connect with the given credentials.')
sys.exit(0)
cmd_func = COMMANDS.get(command)
if cmd_func is None:
raise NotImplemented('Command "{}" is not implemented.'.format(command))
else:
cmd_func()
except Exception as e:
import traceback
return_error('GitHub: {}'.format(str(e)), traceback.format_exc())
|
42,918 |
def is_clique(graph: nx.Graph) -> bool:
"""Determines if the input graph is a clique. A clique of :math:`n` nodes has
:math:`n*(n-1)/2` edges.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.complete_graph(10)
>>> utils.is_clique(graph)
True
Args:
graph (nx.Graph): The input graph.
Returns:
bool: ``True`` if input graph is a clique and ``False`` otherwise.
"""
edges = graph.edges
nodes = graph.order()
return len(edges) == nodes * (nodes - 1) / 2
|
def is_clique(graph: nx.Graph) -> bool:
"""Determines if the input graph is a clique. A clique of :math:`n` nodes has
:math:`n*(n-1)/2` edges.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.complete_graph(10)
>>> utils.is_clique(graph)
True
Args:
graph (nx.Graph): The input graph.
Returns:
bool: ``True`` if input graph is a clique and ``False`` otherwise
"""
edges = graph.edges
nodes = graph.order()
return len(edges) == nodes * (nodes - 1) / 2
|
23,605 |
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode equation to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
7,443 |
def imsave(fname, arr, **kwargs):
"""Load a tiff image to file.
Parameters
----------
fname : str or file
File name or file-like-object.
arr : ndarray
The array to write
kwargs : keyword pairs, optional
Additional keyword arguments to pass through (see ``tifffile``'s
``imwrite`` function).
Notes
-----
Provided by the tifffile library [1]_, and supports many
advanced image types including multi-page and floating point.
This implementation will set `photomotric='RGB'` when writing if the first
or last axis of arr has shape 3 or 4. To override this, explicitly
specify the photometric kwarg.
This implementation will set `planarconfig='SEPARATE'` when writing if the
first axis of arr has shape 3 or 4. To override this, explicitly
specify the planarconfig kwarg.
References
----------
.. [1] https://pypi.org/project/tifffile/
"""
if arr.shape[0] in [3, 4]:
if 'planarconfig' not in kwargs:
kwargs['planarconfig'] = 'SEPARATE'
rgb = True
else:
rgb = arr.shape[-1] in [3, 4]
if rgb and 'photometric' not in kwargs:
kwargs['photometric'] = 'RGB'
return tifffile_imwrite(fname, arr, **kwargs)
|
def imsave(fname, arr, **kwargs):
"""Load a tiff image to file.
Parameters
----------
fname : str or file
File name or file-like-object.
arr : ndarray
The array to write.
kwargs : keyword pairs, optional
Additional keyword arguments to pass through (see ``tifffile``'s
``imwrite`` function).
Notes
-----
Provided by the tifffile library [1]_, and supports many
advanced image types including multi-page and floating point.
This implementation will set `photomotric='RGB'` when writing if the first
or last axis of arr has shape 3 or 4. To override this, explicitly
specify the photometric kwarg.
This implementation will set `planarconfig='SEPARATE'` when writing if the
first axis of arr has shape 3 or 4. To override this, explicitly
specify the planarconfig kwarg.
References
----------
.. [1] https://pypi.org/project/tifffile/
"""
if arr.shape[0] in [3, 4]:
if 'planarconfig' not in kwargs:
kwargs['planarconfig'] = 'SEPARATE'
rgb = True
else:
rgb = arr.shape[-1] in [3, 4]
if rgb and 'photometric' not in kwargs:
kwargs['photometric'] = 'RGB'
return tifffile_imwrite(fname, arr, **kwargs)
|
30,642 |
def parse_outputs(
api_res: Dict,
meta_fields: list = [],
quota_fields: list = [],
resources_fields: list = [],
sandbox_filds: list = []
) -> Dict[str, dict]:
"""Parse group data as received from CrowdStrike FalconX API into Demisto's conventions
the output from the API is a dict that contains the keys: meta, resources and errors
the meta contains a "quota" dict
the "resources" is an array that contains the sandbox dict
the function filters the wanted params from the api result
:param api_res: the api result from the http request
:param meta_fields: the wanted params that appear in the mate section
:param quota_fields: the wanted params that appear in the quota section
:param resources_fields: the wanted params that appear in the resources section
:param sandbox_filds: the wanted params that appear in the sandbox section
:return: a dict based on api_res with the wanted params only
"""
if api_res.get("errors"):
# if there is an error in the api result, return only the error
return api_res.get("errors")
api_res_meta, api_res_quota, api_res_resources, api_res_sandbox = {}, {}, {}, {}
resources_group_outputs, sandbox_group_outputs = {}, {}
api_res_meta = api_res.get("meta")
if api_res_meta:
api_res_quota = api_res_meta.get("quota")
meta_group_outputs = add_outputs_from_dict(api_res_meta, meta_fields)
quota_group_outputs = add_outputs_from_dict(api_res_quota, quota_fields)
if api_res.get("resources"):
# depended on the command, the resources section can be a str list or a list that contains
# only one argument which is a dict
if type(api_res.get("resources")[0]) == dict:
api_res_resources = api_res.get("resources")[0]
resources_group_outputs = add_outputs_from_dict(api_res_resources, resources_fields)
if api_res_resources and api_res_resources.get("sandbox"):
api_res_sandbox = api_res_resources.get("sandbox")[0]
sandbox_group_outputs = add_outputs_from_dict(api_res_sandbox, sandbox_filds)
else:
# the resources section is a list of strings
resources_group_outputs = {"resources": api_res.get("resources")}
merged_dicts = {**meta_group_outputs, **quota_group_outputs, **resources_group_outputs, **sandbox_group_outputs}
return {f'csfalconx.resource(val.resource === obj.resource)': merged_dicts}
|
def parse_outputs(
api_res: Dict,
meta_fields: list = [],
quota_fields: list = [],
resources_fields: list = [],
sandbox_filds: list = []
) -> Dict[str, dict]:
"""Parse group data as received from CrowdStrike FalconX API into Demisto's conventions
the output from the API is a dict that contains the keys: meta, resources and errors
the meta contains a "quota" dict
the "resources" is an array that contains the sandbox dict
the function filters the wanted params from the api result
:param api_res: the api result from the http request
:param meta_fields: the wanted params that appear in the mate section
:param quota_fields: the wanted params that appear in the quota section
:param resources_fields: the wanted params that appear in the resources section
:param sandbox_filds: the wanted params that appear in the sandbox section
:return: a dict based on api_res with the wanted params only
"""
if api_res.get("errors"):
# if there is an error in the api result, return only the error
return api_res.get("errors")
api_res_meta, api_res_quota, api_res_resources, api_res_sandbox = {}, {}, {}, {}
resources_group_outputs, sandbox_group_outputs = {}, {}
api_res_meta = api_res.get("meta")
if api_res_meta:
api_res_quota = api_res_meta.get("quota")
meta_group_outputs = add_outputs_from_dict(api_res_meta, meta_fields)
quota_group_outputs = add_outputs_from_dict(api_res_quota, quota_fields)
if api_res.get("resources"):
# depended on the command, the resources section can be a str list or a list that contains
# only one argument which is a dict
if type(api_res.get("resources")[0]) == dict:
api_res_resources = api_res.get("resources")[0]
resources_group_outputs = add_outputs_from_dict(api_res_resources, resources_fields)
if api_res_resources and api_res_resources.get("sandbox"):
api_res_sandbox = api_res_resources.get("sandbox")[0]
sandbox_group_outputs = add_outputs_from_dict(api_res_sandbox, sandbox_fields)
else:
# the resources section is a list of strings
resources_group_outputs = {"resources": api_res.get("resources")}
merged_dicts = {**meta_group_outputs, **quota_group_outputs, **resources_group_outputs, **sandbox_group_outputs}
return {f'csfalconx.resource(val.resource === obj.resource)': merged_dicts}
|
37,571 |
def _plot_histogram_data(data, labels, number_to_keep):
"""Generate the data needed for plotting counts.
Parameters:
data (list or dict): This is either a list of dictionaries or a single
dict containing the values to represent (ex {'001': 130})
labels (list): The list of bitstring labels for the plot.
number_to_keep (int): The number of terms to plot and rest
is made into a single bar called 'rest'.
Returns:
tuple: tuple containing:
(dict): The labels actually used in the plotting.
(list): List of ndarrays for the bars in each experiment.
(list): Indices for the locations of the bars for each
experiment.
"""
labels_dict = OrderedDict()
all_pvalues = []
all_inds = []
# if multiple executions, we consider number_to_keep for each execution
# and this may result in more than number_to_keep slots
multimple_exec_keys_dict = OrderedDict()
if len(data) > 1 and number_to_keep is not None:
for execution in data:
for common_key in dict(Counter(execution).most_common(number_to_keep)):
multimple_exec_keys_dict[common_key] = 1
for execution in data:
if number_to_keep is not None:
data_temp = dict(Counter(execution).most_common(number_to_keep))
data_temp["rest"] = sum(execution.values()) - sum(data_temp.values())
execution = data_temp
values = []
for key in labels:
if key not in execution:
if number_to_keep is None:
labels_dict[key] = 1
values.append(0)
else:
if key in multimple_exec_keys_dict:
# save label only if the key is present in other execution
labels_dict[key] = 1
values.append(0)
else:
labels_dict[key] = 1
values.append(execution[key])
values = np.array(values, dtype=float)
pvalues = values / sum(values)
all_pvalues.append(pvalues)
numelem = len(values)
ind = np.arange(numelem) # the x locations for the groups
all_inds.append(ind)
return labels_dict, all_pvalues, all_inds
|
def _plot_histogram_data(data, labels, number_to_keep):
"""Generate the data needed for plotting counts.
Parameters:
data (list or dict): This is either a list of dictionaries or a single
dict containing the values to represent (ex {'001': 130})
labels (list): The list of bitstring labels for the plot.
number_to_keep (int): The number of terms to plot and rest
is made into a single bar called 'rest'.
Returns:
tuple: tuple containing:
(dict): The labels actually used in the plotting.
(list): List of ndarrays for the bars in each experiment.
(list): Indices for the locations of the bars for each
experiment.
"""
labels_dict = OrderedDict()
all_pvalues = []
all_inds = []
# if multiple executions, we consider number_to_keep for each execution
# and this may result in more than number_to_keep slots
multimple_exec_keys_dict = OrderedDict()
if len(data) > 1 and number_to_keep is not None:
for execution in data:
for common_key in dict(Counter(execution).most_common(number_to_keep)):
multiple_exec_keys_dict[common_key] = 1
for execution in data:
if number_to_keep is not None:
data_temp = dict(Counter(execution).most_common(number_to_keep))
data_temp["rest"] = sum(execution.values()) - sum(data_temp.values())
execution = data_temp
values = []
for key in labels:
if key not in execution:
if number_to_keep is None:
labels_dict[key] = 1
values.append(0)
else:
if key in multimple_exec_keys_dict:
# save label only if the key is present in other execution
labels_dict[key] = 1
values.append(0)
else:
labels_dict[key] = 1
values.append(execution[key])
values = np.array(values, dtype=float)
pvalues = values / sum(values)
all_pvalues.append(pvalues)
numelem = len(values)
ind = np.arange(numelem) # the x locations for the groups
all_inds.append(ind)
return labels_dict, all_pvalues, all_inds
|
7,854 |
def test_atoms_distribmat_cell(uo2, water):
""" Test if correct number of atoms is returned for a cell with
'distribmat' fill
"""
c = openmc.Cell(fill=[uo2, water])
c.volume = 6.0
# Calculate the expected number of atoms
expected_nucs = ['U235', 'O16', 'H1']
M_uo2 = ((atomic_mass('U235') + 2 * atomic_mass('O16'))/3)
M_water = ((2 * atomic_mass('H1') + atomic_mass('O16'))/3)
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M_uo2 * AVOGADRO * 3.0) # U235
expected_atoms.append(2/3 * uo2.density/M_uo2 * AVOGADRO * 3.0 +
1/3 * water.density/M_water * AVOGADRO * 3.0) # O16
expected_atoms.append(2/3 * water.density/M_water * AVOGADRO * 3.0) # H1
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
|
def test_atoms_distribmat_cell(uo2, water):
""" Test if correct number of atoms is returned for a cell with
'distribmat' fill
"""
c = openmc.Cell(fill=[uo2, water])
c.volume = 6.0
# Calculate the expected number of atoms
expected_nucs = ['U235', 'O16', 'H1']
M_uo2 = (atomic_mass('U235') + 2 * atomic_mass('O16')) / 3
M_water = ((2 * atomic_mass('H1') + atomic_mass('O16'))/3)
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M_uo2 * AVOGADRO * 3.0) # U235
expected_atoms.append(2/3 * uo2.density/M_uo2 * AVOGADRO * 3.0 +
1/3 * water.density/M_water * AVOGADRO * 3.0) # O16
expected_atoms.append(2/3 * water.density/M_water * AVOGADRO * 3.0) # H1
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
|
22,090 |
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
ln, off = struct.unpack('H2xI', request[36:44])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
10,473 |
def parse_args():
"""Parse command line arguments."""
try:
import argparse
except ImportError:
if '--requirements' not in sys.argv:
raise
# install argparse without using constraints since pip may be too old to support them
# not using the ansible-test requirements file since this install is for sys.executable rather than the delegated python (which may be different)
# argparse has no special requirements, so upgrading pip is not required here
raw_command(generate_pip_install(generate_pip_command(sys.executable), 'argparse', packages=['argparse'], use_constraints=False))
import argparse
try:
import argcomplete
except ImportError:
argcomplete = None
if argcomplete:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
def key_value_type(value): # type: (str) -> t.Tuple[str, str]
"""Wrapper around key_value."""
return key_value(argparse, value)
parser = argparse.ArgumentParser(epilog=epilog)
common = argparse.ArgumentParser(add_help=False)
common.add_argument('-e', '--explain',
action='store_true',
help='explain commands that would be executed')
common.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
common.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
common.add_argument('--debug',
action='store_true',
help='run ansible commands in debug mode')
# noinspection PyTypeChecker
common.add_argument('--truncate',
dest='truncate',
metavar='COLUMNS',
type=int,
default=display.columns,
help='truncate some long output (0=disabled) (default: auto)')
common.add_argument('--redact',
dest='redact',
action='store_true',
default=True,
help='redact sensitive values in output')
common.add_argument('--no-redact',
dest='redact',
action='store_false',
default=False,
help='show sensitive values in output')
common.add_argument('--check-python',
choices=SUPPORTED_PYTHON_VERSIONS,
help=argparse.SUPPRESS)
test = argparse.ArgumentParser(add_help=False, parents=[common])
test.add_argument('include',
metavar='TARGET',
nargs='*',
help='test the specified target').completer = complete_target
test.add_argument('--include',
metavar='TARGET',
action='append',
help='include the specified target').completer = complete_target
test.add_argument('--exclude',
metavar='TARGET',
action='append',
help='exclude the specified target').completer = complete_target
test.add_argument('--require',
metavar='TARGET',
action='append',
help='require the specified target').completer = complete_target
test.add_argument('--coverage',
action='store_true',
help='analyze code coverage when running tests')
test.add_argument('--coverage-label',
default='',
help='label to include in coverage output file names')
test.add_argument('--coverage-check',
action='store_true',
help='only verify code coverage can be enabled')
test.add_argument('--metadata',
help=argparse.SUPPRESS)
test.add_argument('--base-branch',
help=argparse.SUPPRESS)
add_changes(test, argparse)
add_environments(test)
integration = argparse.ArgumentParser(add_help=False, parents=[test])
integration.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
integration.add_argument('--start-at',
metavar='TARGET',
help='start at the specified target').completer = complete_target
integration.add_argument('--start-at-task',
metavar='TASK',
help='start at the specified task')
integration.add_argument('--tags',
metavar='TAGS',
help='only run plays and tasks tagged with these values')
integration.add_argument('--skip-tags',
metavar='TAGS',
help='only run plays and tasks whose tags do not match these values')
integration.add_argument('--diff',
action='store_true',
help='show diff output')
integration.add_argument('--allow-destructive',
action='store_true',
help='allow destructive tests')
integration.add_argument('--allow-root',
action='store_true',
help='allow tests requiring root when not root')
integration.add_argument('--allow-disabled',
action='store_true',
help='allow tests which have been marked as disabled')
integration.add_argument('--allow-unstable',
action='store_true',
help='allow tests which have been marked as unstable')
integration.add_argument('--allow-unstable-changed',
action='store_true',
help='allow tests which have been marked as unstable when focused changes are detected')
integration.add_argument('--allow-unsupported',
action='store_true',
help='allow tests which have been marked as unsupported')
integration.add_argument('--retry-on-error',
action='store_true',
help='retry failed test with increased verbosity')
integration.add_argument('--continue-on-error',
action='store_true',
help='continue after failed test')
integration.add_argument('--debug-strategy',
action='store_true',
help='run test playbooks using the debug strategy')
integration.add_argument('--changed-all-target',
metavar='TARGET',
default='all',
help='target to run when all tests are needed')
integration.add_argument('--changed-all-mode',
metavar='MODE',
choices=('default', 'include', 'exclude'),
help='include/exclude behavior with --changed-all-target: %(choices)s')
integration.add_argument('--list-targets',
action='store_true',
help='list matching targets instead of running tests')
integration.add_argument('--no-temp-workdir',
action='store_true',
help='do not run tests from a temporary directory (use only for verifying broken tests)')
integration.add_argument('--no-temp-unicode',
action='store_true',
help='avoid unicode characters in temporary directory (use only for verifying broken tests)')
subparsers = parser.add_subparsers(metavar='COMMAND')
subparsers.required = True # work-around for python 3 bug which makes subparsers optional
posix_integration = subparsers.add_parser('integration',
parents=[integration],
help='posix integration tests')
posix_integration.set_defaults(func=command_posix_integration,
targets=walk_posix_integration_targets,
config=PosixIntegrationConfig)
add_extra_docker_options(posix_integration)
add_httptester_options(posix_integration, argparse)
network_integration = subparsers.add_parser('network-integration',
parents=[integration],
help='network integration tests')
network_integration.set_defaults(func=command_network_integration,
targets=walk_network_integration_targets,
config=NetworkIntegrationConfig)
add_extra_docker_options(network_integration, integration=False)
network_integration.add_argument('--platform',
metavar='PLATFORM',
action='append',
help='network platform/version').completer = complete_network_platform
network_integration.add_argument('--platform-collection',
type=key_value_type,
metavar='PLATFORM=COLLECTION',
action='append',
help='collection used to test platform').completer = complete_network_platform_collection
network_integration.add_argument('--platform-connection',
type=key_value_type,
metavar='PLATFORM=CONNECTION',
action='append',
help='connection used to test platform').completer = complete_network_platform_connection
network_integration.add_argument('--inventory',
metavar='PATH',
help='path to inventory used for tests')
network_integration.add_argument('--testcase',
metavar='TESTCASE',
help='limit a test to a specified testcase').completer = complete_network_testcase
windows_integration = subparsers.add_parser('windows-integration',
parents=[integration],
help='windows integration tests')
windows_integration.set_defaults(func=command_windows_integration,
targets=walk_windows_integration_targets,
config=WindowsIntegrationConfig)
add_extra_docker_options(windows_integration, integration=False)
add_httptester_options(windows_integration, argparse)
windows_integration.add_argument('--windows',
metavar='VERSION',
action='append',
help='windows version').completer = complete_windows
windows_integration.add_argument('--inventory',
metavar='PATH',
help='path to inventory used for tests')
units = subparsers.add_parser('units',
parents=[test],
help='unit tests')
units.set_defaults(func=command_units,
targets=walk_units_targets,
config=UnitsConfig)
units.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
units.add_argument('--collect-only',
action='store_true',
help='collect tests but do not execute them')
# noinspection PyTypeChecker
units.add_argument('--num-workers',
type=int,
help='number of workers to use (default: auto)')
units.add_argument('--requirements-mode',
choices=('only', 'skip'),
help=argparse.SUPPRESS)
add_extra_docker_options(units, integration=False)
sanity = subparsers.add_parser('sanity',
parents=[test],
help='sanity tests')
sanity.set_defaults(func=command_sanity,
targets=walk_sanity_targets,
config=SanityConfig)
sanity.add_argument('--test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to run').completer = complete_sanity_test
sanity.add_argument('--skip-test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to skip').completer = complete_sanity_test
sanity.add_argument('--allow-disabled',
action='store_true',
help='allow tests to run which are disabled by default')
sanity.add_argument('--list-tests',
action='store_true',
help='list available tests')
sanity.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
sanity.add_argument('--enable-optional-errors',
action='store_true',
help='enable optional errors')
add_lint(sanity)
add_extra_docker_options(sanity, integration=False)
shell = subparsers.add_parser('shell',
parents=[common],
help='open an interactive shell')
shell.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
shell.set_defaults(func=command_shell,
config=ShellConfig)
shell.add_argument('--raw',
action='store_true',
help='direct to shell with no setup')
add_environments(shell)
add_extra_docker_options(shell)
add_httptester_options(shell, argparse)
coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
add_environments(coverage_common, isolated_delegation=False)
coverage = subparsers.add_parser('coverage',
help='code coverage management and reporting')
coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
add_coverage_analyze(coverage_subparsers, coverage_common)
coverage_combine = coverage_subparsers.add_parser('combine',
parents=[coverage_common],
help='combine coverage data and rewrite remote paths')
coverage_combine.set_defaults(func=command_coverage_combine,
config=CoverageConfig)
add_extra_coverage_options(coverage_combine)
coverage_erase = coverage_subparsers.add_parser('erase',
parents=[coverage_common],
help='erase coverage data files')
coverage_erase.set_defaults(func=command_coverage_erase,
config=CoverageConfig)
coverage_report = coverage_subparsers.add_parser('report',
parents=[coverage_common],
help='generate console coverage report')
coverage_report.set_defaults(func=command_coverage_report,
config=CoverageReportConfig)
coverage_report.add_argument('--show-missing',
action='store_true',
help='show line numbers of statements not executed')
coverage_report.add_argument('--include',
metavar='PAT1,PAT2,...',
help='include only files whose paths match one of these '
'patterns. Accepts shell-style wildcards, which must be '
'quoted.')
coverage_report.add_argument('--omit',
metavar='PAT1,PAT2,...',
help='omit files whose paths match one of these patterns. '
'Accepts shell-style wildcards, which must be quoted.')
add_extra_coverage_options(coverage_report)
coverage_html = coverage_subparsers.add_parser('html',
parents=[coverage_common],
help='generate html coverage report')
coverage_html.set_defaults(func=command_coverage_html,
config=CoverageConfig)
add_extra_coverage_options(coverage_html)
coverage_xml = coverage_subparsers.add_parser('xml',
parents=[coverage_common],
help='generate xml coverage report')
coverage_xml.set_defaults(func=command_coverage_xml,
config=CoverageConfig)
add_extra_coverage_options(coverage_xml)
env = subparsers.add_parser('env',
parents=[common],
help='show information about the test environment')
env.set_defaults(func=command_env,
config=EnvConfig)
env.add_argument('--show',
action='store_true',
help='show environment on stdout')
env.add_argument('--dump',
action='store_true',
help='dump environment to disk')
env.add_argument('--list-files',
action='store_true',
help='list files on stdout')
# noinspection PyTypeChecker
env.add_argument('--timeout',
type=int,
metavar='MINUTES',
help='timeout for future ansible-test commands (0 clears)')
if argcomplete:
argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
args = parser.parse_args()
if args.explain and not args.verbosity:
args.verbosity = 1
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
else:
args.color = sys.stdout.isatty()
return args
|
def parse_args():
"""Parse command line arguments."""
try:
import argparse
except ImportError:
if '--requirements' not in sys.argv:
raise
# install argparse without using constraints since pip may be too old to support them
# not using the ansible-test requirements file since this install is for sys.executable rather than the delegated python (which may be different)
# argparse has no special requirements, so upgrading pip is not required here
raw_command(generate_pip_install(generate_pip_command(sys.executable), 'argparse', packages=['argparse'], use_constraints=False))
import argparse
try:
import argcomplete
except ImportError:
argcomplete = None
if argcomplete:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
def key_value_type(value): # type: (str) -> t.Tuple[str, str]
"""Wrapper around key_value."""
return key_value(argparse, value)
parser = argparse.ArgumentParser(epilog=epilog)
common = argparse.ArgumentParser(add_help=False)
common.add_argument('-e', '--explain',
action='store_true',
help='explain commands that would be executed')
common.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
common.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
common.add_argument('--debug',
action='store_true',
help='run ansible commands in debug mode')
# noinspection PyTypeChecker
common.add_argument('--truncate',
dest='truncate',
metavar='COLUMNS',
type=int,
default=display.columns,
help='truncate some long output (0=disabled) (default: auto)')
common.add_argument('--redact',
dest='redact',
action='store_true',
default=True,
help='redact sensitive values in output')
common.add_argument('--no-redact',
dest='redact',
action='store_false',
default=False,
help='show sensitive values in output')
common.add_argument('--check-python',
choices=SUPPORTED_PYTHON_VERSIONS,
help=argparse.SUPPRESS)
test = argparse.ArgumentParser(add_help=False, parents=[common])
test.add_argument('include',
metavar='TARGET',
nargs='*',
help='test the specified target').completer = complete_target
test.add_argument('--include',
metavar='TARGET',
action='append',
help='include the specified target').completer = complete_target
test.add_argument('--exclude',
metavar='TARGET',
action='append',
help='exclude the specified target').completer = complete_target
test.add_argument('--require',
metavar='TARGET',
action='append',
help='require the specified target').completer = complete_target
test.add_argument('--coverage',
action='store_true',
help='analyze code coverage when running tests')
test.add_argument('--coverage-label',
default='',
help='label to include in coverage output file names')
test.add_argument('--coverage-check',
action='store_true',
help='only verify code coverage can be enabled')
test.add_argument('--metadata',
help=argparse.SUPPRESS)
test.add_argument('--base-branch',
help='base branch used for change detection')
add_changes(test, argparse)
add_environments(test)
integration = argparse.ArgumentParser(add_help=False, parents=[test])
integration.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
integration.add_argument('--start-at',
metavar='TARGET',
help='start at the specified target').completer = complete_target
integration.add_argument('--start-at-task',
metavar='TASK',
help='start at the specified task')
integration.add_argument('--tags',
metavar='TAGS',
help='only run plays and tasks tagged with these values')
integration.add_argument('--skip-tags',
metavar='TAGS',
help='only run plays and tasks whose tags do not match these values')
integration.add_argument('--diff',
action='store_true',
help='show diff output')
integration.add_argument('--allow-destructive',
action='store_true',
help='allow destructive tests')
integration.add_argument('--allow-root',
action='store_true',
help='allow tests requiring root when not root')
integration.add_argument('--allow-disabled',
action='store_true',
help='allow tests which have been marked as disabled')
integration.add_argument('--allow-unstable',
action='store_true',
help='allow tests which have been marked as unstable')
integration.add_argument('--allow-unstable-changed',
action='store_true',
help='allow tests which have been marked as unstable when focused changes are detected')
integration.add_argument('--allow-unsupported',
action='store_true',
help='allow tests which have been marked as unsupported')
integration.add_argument('--retry-on-error',
action='store_true',
help='retry failed test with increased verbosity')
integration.add_argument('--continue-on-error',
action='store_true',
help='continue after failed test')
integration.add_argument('--debug-strategy',
action='store_true',
help='run test playbooks using the debug strategy')
integration.add_argument('--changed-all-target',
metavar='TARGET',
default='all',
help='target to run when all tests are needed')
integration.add_argument('--changed-all-mode',
metavar='MODE',
choices=('default', 'include', 'exclude'),
help='include/exclude behavior with --changed-all-target: %(choices)s')
integration.add_argument('--list-targets',
action='store_true',
help='list matching targets instead of running tests')
integration.add_argument('--no-temp-workdir',
action='store_true',
help='do not run tests from a temporary directory (use only for verifying broken tests)')
integration.add_argument('--no-temp-unicode',
action='store_true',
help='avoid unicode characters in temporary directory (use only for verifying broken tests)')
subparsers = parser.add_subparsers(metavar='COMMAND')
subparsers.required = True # work-around for python 3 bug which makes subparsers optional
posix_integration = subparsers.add_parser('integration',
parents=[integration],
help='posix integration tests')
posix_integration.set_defaults(func=command_posix_integration,
targets=walk_posix_integration_targets,
config=PosixIntegrationConfig)
add_extra_docker_options(posix_integration)
add_httptester_options(posix_integration, argparse)
network_integration = subparsers.add_parser('network-integration',
parents=[integration],
help='network integration tests')
network_integration.set_defaults(func=command_network_integration,
targets=walk_network_integration_targets,
config=NetworkIntegrationConfig)
add_extra_docker_options(network_integration, integration=False)
network_integration.add_argument('--platform',
metavar='PLATFORM',
action='append',
help='network platform/version').completer = complete_network_platform
network_integration.add_argument('--platform-collection',
type=key_value_type,
metavar='PLATFORM=COLLECTION',
action='append',
help='collection used to test platform').completer = complete_network_platform_collection
network_integration.add_argument('--platform-connection',
type=key_value_type,
metavar='PLATFORM=CONNECTION',
action='append',
help='connection used to test platform').completer = complete_network_platform_connection
network_integration.add_argument('--inventory',
metavar='PATH',
help='path to inventory used for tests')
network_integration.add_argument('--testcase',
metavar='TESTCASE',
help='limit a test to a specified testcase').completer = complete_network_testcase
windows_integration = subparsers.add_parser('windows-integration',
parents=[integration],
help='windows integration tests')
windows_integration.set_defaults(func=command_windows_integration,
targets=walk_windows_integration_targets,
config=WindowsIntegrationConfig)
add_extra_docker_options(windows_integration, integration=False)
add_httptester_options(windows_integration, argparse)
windows_integration.add_argument('--windows',
metavar='VERSION',
action='append',
help='windows version').completer = complete_windows
windows_integration.add_argument('--inventory',
metavar='PATH',
help='path to inventory used for tests')
units = subparsers.add_parser('units',
parents=[test],
help='unit tests')
units.set_defaults(func=command_units,
targets=walk_units_targets,
config=UnitsConfig)
units.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
units.add_argument('--collect-only',
action='store_true',
help='collect tests but do not execute them')
# noinspection PyTypeChecker
units.add_argument('--num-workers',
type=int,
help='number of workers to use (default: auto)')
units.add_argument('--requirements-mode',
choices=('only', 'skip'),
help=argparse.SUPPRESS)
add_extra_docker_options(units, integration=False)
sanity = subparsers.add_parser('sanity',
parents=[test],
help='sanity tests')
sanity.set_defaults(func=command_sanity,
targets=walk_sanity_targets,
config=SanityConfig)
sanity.add_argument('--test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to run').completer = complete_sanity_test
sanity.add_argument('--skip-test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to skip').completer = complete_sanity_test
sanity.add_argument('--allow-disabled',
action='store_true',
help='allow tests to run which are disabled by default')
sanity.add_argument('--list-tests',
action='store_true',
help='list available tests')
sanity.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
sanity.add_argument('--enable-optional-errors',
action='store_true',
help='enable optional errors')
add_lint(sanity)
add_extra_docker_options(sanity, integration=False)
shell = subparsers.add_parser('shell',
parents=[common],
help='open an interactive shell')
shell.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
shell.set_defaults(func=command_shell,
config=ShellConfig)
shell.add_argument('--raw',
action='store_true',
help='direct to shell with no setup')
add_environments(shell)
add_extra_docker_options(shell)
add_httptester_options(shell, argparse)
coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
add_environments(coverage_common, isolated_delegation=False)
coverage = subparsers.add_parser('coverage',
help='code coverage management and reporting')
coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
add_coverage_analyze(coverage_subparsers, coverage_common)
coverage_combine = coverage_subparsers.add_parser('combine',
parents=[coverage_common],
help='combine coverage data and rewrite remote paths')
coverage_combine.set_defaults(func=command_coverage_combine,
config=CoverageConfig)
add_extra_coverage_options(coverage_combine)
coverage_erase = coverage_subparsers.add_parser('erase',
parents=[coverage_common],
help='erase coverage data files')
coverage_erase.set_defaults(func=command_coverage_erase,
config=CoverageConfig)
coverage_report = coverage_subparsers.add_parser('report',
parents=[coverage_common],
help='generate console coverage report')
coverage_report.set_defaults(func=command_coverage_report,
config=CoverageReportConfig)
coverage_report.add_argument('--show-missing',
action='store_true',
help='show line numbers of statements not executed')
coverage_report.add_argument('--include',
metavar='PAT1,PAT2,...',
help='include only files whose paths match one of these '
'patterns. Accepts shell-style wildcards, which must be '
'quoted.')
coverage_report.add_argument('--omit',
metavar='PAT1,PAT2,...',
help='omit files whose paths match one of these patterns. '
'Accepts shell-style wildcards, which must be quoted.')
add_extra_coverage_options(coverage_report)
coverage_html = coverage_subparsers.add_parser('html',
parents=[coverage_common],
help='generate html coverage report')
coverage_html.set_defaults(func=command_coverage_html,
config=CoverageConfig)
add_extra_coverage_options(coverage_html)
coverage_xml = coverage_subparsers.add_parser('xml',
parents=[coverage_common],
help='generate xml coverage report')
coverage_xml.set_defaults(func=command_coverage_xml,
config=CoverageConfig)
add_extra_coverage_options(coverage_xml)
env = subparsers.add_parser('env',
parents=[common],
help='show information about the test environment')
env.set_defaults(func=command_env,
config=EnvConfig)
env.add_argument('--show',
action='store_true',
help='show environment on stdout')
env.add_argument('--dump',
action='store_true',
help='dump environment to disk')
env.add_argument('--list-files',
action='store_true',
help='list files on stdout')
# noinspection PyTypeChecker
env.add_argument('--timeout',
type=int,
metavar='MINUTES',
help='timeout for future ansible-test commands (0 clears)')
if argcomplete:
argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
args = parser.parse_args()
if args.explain and not args.verbosity:
args.verbosity = 1
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
else:
args.color = sys.stdout.isatty()
return args
|
8,885 |
def handle_list(options):
"""Display a list of configurations available in Sopel's config directory.
:param options: parsed arguments
:type options: ``argparse.Namespace``
:return: Return 0 if everything went fine.
This command displays an unordered list of config names from Sopel's
config directory, without their extensions::
$ sopel-config list
default
custom
By default, the config directory is ``~/.sopel``. To select a different
config directory, options ``--config-dir`` can be used.
It is possible to filter by extension using the
``-e``/``--ext``/``--extension`` option; default is ``.cfg``
(the ``.`` prefix is not required).
"""
configdir = options.configdir
display_path = options.display_path
extension = options.extension
if not extension.startswith('.'):
extension = '.' + extension
configs = utils.enumerate_configs(configdir, extension)
for config_filename in configs:
if display_path:
print(os.path.join(configdir, config_filename))
else:
name, _ = os.path.splitext(config_filename)
print(name)
return 0 # successful operation
|
def handle_list(options):
"""Display a list of configurations available in Sopel's config directory.
:param options: parsed arguments
:type options: ``argparse.Namespace``
:return: 0 if everything went fine
This command displays an unordered list of config names from Sopel's
config directory, without their extensions::
$ sopel-config list
default
custom
By default, the config directory is ``~/.sopel``. To select a different
config directory, options ``--config-dir`` can be used.
It is possible to filter by extension using the
``-e``/``--ext``/``--extension`` option; default is ``.cfg``
(the ``.`` prefix is not required).
"""
configdir = options.configdir
display_path = options.display_path
extension = options.extension
if not extension.startswith('.'):
extension = '.' + extension
configs = utils.enumerate_configs(configdir, extension)
for config_filename in configs:
if display_path:
print(os.path.join(configdir, config_filename))
else:
name, _ = os.path.splitext(config_filename)
print(name)
return 0 # successful operation
|
50,163 |
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType),
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
signature = safe_inspect_signature(runtime)
if signature:
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
else:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
if isinstance(runtime, type):
runtime_type = runtime # This might be a class
else:
runtime_type = type(runtime) # Or an instance
stub = get_stub(runtime_type.__module__)
if stub is None:
return None
type_name = runtime_type.__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
value: Union[bool, int, str]
if isinstance(runtime, bytes):
value = bytes_to_human_readable_repr(runtime)
elif isinstance(runtime, enum.Enum):
value = runtime.name
elif isinstance(runtime, (bool, int, str)):
value = runtime
else:
if isinstance(runtime, type):
return mypy.types.TypeType(fallback)
return fallback
return mypy.types.LiteralType(value=value, fallback=fallback)
|
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType),
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
signature = safe_inspect_signature(runtime)
if signature:
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
else:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
if isinstance(runtime, type):
runtime_type = runtime # This might be a class
else:
runtime_type = type(runtime) # Or an instance
stub = get_stub(runtime_type.__module__)
if stub is None:
return None
type_name = getattr(runtime_type, "__name__", None)
if not isinstance(type_name, str) or type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
value: Union[bool, int, str]
if isinstance(runtime, bytes):
value = bytes_to_human_readable_repr(runtime)
elif isinstance(runtime, enum.Enum):
value = runtime.name
elif isinstance(runtime, (bool, int, str)):
value = runtime
else:
if isinstance(runtime, type):
return mypy.types.TypeType(fallback)
return fallback
return mypy.types.LiteralType(value=value, fallback=fallback)
|
8,859 |
def get_pid_filename(settings, pid_dir):
"""Get the pid file name in ``pid_dir`` from the given ``settings``.
:param settings: Sopel config
:type settings: :class:`sopel.config.Config`
:param str pid_dir: path to the pid directory
:return: absolute filename of the pid file
By default, it's ``sopel.pid``, but if the configuration's basename is not
``default`` then it will use it to generate the pid file as
``sopel-{basename}.pid`` instead.
"""
name = 'sopel.pid'
if settings.basename != 'default':
filename = os.path.basename(settings.filename)
basename, ext = os.path.splitext(filename)
if ext != '.cfg':
basename = filename
name = 'sopel-%s.pid' % basename
return os.path.abspath(os.path.join(pid_dir, name))
|
def get_pid_filename(settings, pid_dir):
"""Get the pid file name in ``pid_dir`` from the given ``settings``.
:param settings: Sopel config
:type settings: :class:`sopel.config.Config`
:param str pid_dir: path to the pid directory
:return: absolute filename of the pid file
By default, it's ``sopel.pid``, but if the configuration's basename is not
``default`` then it will be used to generate the pid file name as
``sopel-{basename}.pid`` instead.
"""
name = 'sopel.pid'
if settings.basename != 'default':
filename = os.path.basename(settings.filename)
basename, ext = os.path.splitext(filename)
if ext != '.cfg':
basename = filename
name = 'sopel-%s.pid' % basename
return os.path.abspath(os.path.join(pid_dir, name))
|
57,824 |
def parse_results(ip: str, raw_result: Dict[str, Any], reliability: DBotScoreReliability) -> List[CommandResults]:
command_results = []
# default values
asn = as_owner = None
feed_related_indicators = []
if raw_result:
hostname = raw_result.get('hostname')
feed_related_indicators.append(
Common.FeedRelatedIndicators(hostname,
FeedIndicatorType.URL if urlRegex.find(hostname)
else FeedIndicatorType.Domain))
if 'asn' in raw_result:
asn = demisto.get(raw_result, 'asn.asn')
as_owner = demisto.get(raw_result, 'asn.name')
as_domain = demisto.get(raw_result, 'asn.domain')
if as_domain:
feed_related_indicators.append(Common.FeedRelatedIndicators(as_domain, FeedIndicatorType.Domain))
elif 'org' in raw_result:
org = raw_result.get('org', '')
org_parts = org.split(' ')
if ' ' in org:
asn, as_owner = org_parts[0], ' '.join(org_parts[1:])
organization = {
'Name': demisto.get(raw_result, 'company.name'),
'Type': demisto.get(raw_result, 'company.type')
} if 'company' in raw_result else None
company_domain = demisto.get(raw_result, 'company.domain')
if company_domain is not None:
feed_related_indicators.append(Common.FeedRelatedIndicators(company_domain, FeedIndicatorType.Domain))
abuse = {
'Address': demisto.get(raw_result, 'abuse.address'),
'Country': demisto.get(raw_result, 'abuse.country'),
'Name': demisto.get(raw_result, 'abuse.name'),
'Network': demisto.get(raw_result, 'abuse.network'),
'Phone': demisto.get(raw_result, 'abuse.phone'),
'Email': demisto.get(raw_result, 'abuse.email')
} if 'abuse' in raw_result else None
tags = []
for (tag_path, tag_name) in (('privacy.hosting', 'hosting'),
('privacy.proxy', 'proxy'),
('privacy.tor', 'tor'),
('privacy.vpn', 'vpn')):
if demisto.get(raw_result, tag_path):
tags.append(tag_name)
city = raw_result.get('city')
region = raw_result.get('region')
postal = raw_result.get('postal')
country = raw_result.get('country')
description = ', '.join(filter(None, [city, region, postal, country]))
# parses geolocation
lat = lon = ''
loc = raw_result.get('loc', '') # empty string as default on purpose,
if ',' in loc:
coordinates = loc.split(',')
lat, lon = float(coordinates[0]), float(coordinates[1])
entry_context = {'Address': raw_result.get('ip'),
'Hostname': hostname,
'ASN': asn,
'ASOwner': as_owner,
'Tags': tags,
'Organization': organization,
'Geo': {'Location': loc, 'Country': country, 'Description': description},
'Registrar': {'Abuse': abuse} if abuse else None}
outputs_key_field = 'Address' # marks the ip address
indicator = Common.IP(
ip=ip,
dbot_score=Common.DBotScore(indicator='ip',
indicator_type=DBotScoreType.IP,
integration_name='IPinfo_v2',
reliability=reliability,
score=Common.DBotScore.NONE),
asn=asn,
hostname=hostname,
feed_related_indicators=feed_related_indicators,
geo_latitude=str(lat) if lat else None,
geo_longitude=str(lon) if lon else None,
geo_description=description or None,
geo_country=country,
tags=','.join(tags))
command_results.append(CommandResults(
readable_output=tableToMarkdown(f'IPinfo results for {ip}', raw_result),
raw_response=raw_result,
outputs_prefix='IPinfo.IP',
outputs=entry_context,
outputs_key_field=outputs_key_field,
indicator=indicator))
if lat and lon:
map_output = CommandResults(raw_response={'lat': lat, 'lng': lon},
entry_type=EntryType.MAP_ENTRY_TYPE,
outputs_key_field=outputs_key_field,
indicator=indicator)
command_results.append(map_output)
return command_results
|
def parse_results(ip: str, raw_result: Dict[str, Any], reliability: DBotScoreReliability) -> List[CommandResults]:
command_results = []
# default values
asn = as_owner = None
feed_related_indicators = []
if raw_result:
hostname = raw_result.get('hostname')
feed_related_indicators.append(
Common.FeedRelatedIndicators(hostname,
FeedIndicatorType.URL if urlRegex.find(hostname)
else FeedIndicatorType.Domain))
if 'asn' in raw_result:
asn = demisto.get(raw_result, 'asn.asn')
as_owner = demisto.get(raw_result, 'asn.name')
as_domain = demisto.get(raw_result, 'asn.domain')
if as_domain:
feed_related_indicators.append(Common.FeedRelatedIndicators(as_domain, FeedIndicatorType.Domain))
elif 'org' in raw_result:
org = raw_result.get('org', '')
if ' ' in org:
org_parts = org.split(' ')
asn, as_owner = org_parts[0], ' '.join(org_parts[1:])
organization = {
'Name': demisto.get(raw_result, 'company.name'),
'Type': demisto.get(raw_result, 'company.type')
} if 'company' in raw_result else None
company_domain = demisto.get(raw_result, 'company.domain')
if company_domain is not None:
feed_related_indicators.append(Common.FeedRelatedIndicators(company_domain, FeedIndicatorType.Domain))
abuse = {
'Address': demisto.get(raw_result, 'abuse.address'),
'Country': demisto.get(raw_result, 'abuse.country'),
'Name': demisto.get(raw_result, 'abuse.name'),
'Network': demisto.get(raw_result, 'abuse.network'),
'Phone': demisto.get(raw_result, 'abuse.phone'),
'Email': demisto.get(raw_result, 'abuse.email')
} if 'abuse' in raw_result else None
tags = []
for (tag_path, tag_name) in (('privacy.hosting', 'hosting'),
('privacy.proxy', 'proxy'),
('privacy.tor', 'tor'),
('privacy.vpn', 'vpn')):
if demisto.get(raw_result, tag_path):
tags.append(tag_name)
city = raw_result.get('city')
region = raw_result.get('region')
postal = raw_result.get('postal')
country = raw_result.get('country')
description = ', '.join(filter(None, [city, region, postal, country]))
# parses geolocation
lat = lon = ''
loc = raw_result.get('loc', '') # empty string as default on purpose,
if ',' in loc:
coordinates = loc.split(',')
lat, lon = float(coordinates[0]), float(coordinates[1])
entry_context = {'Address': raw_result.get('ip'),
'Hostname': hostname,
'ASN': asn,
'ASOwner': as_owner,
'Tags': tags,
'Organization': organization,
'Geo': {'Location': loc, 'Country': country, 'Description': description},
'Registrar': {'Abuse': abuse} if abuse else None}
outputs_key_field = 'Address' # marks the ip address
indicator = Common.IP(
ip=ip,
dbot_score=Common.DBotScore(indicator='ip',
indicator_type=DBotScoreType.IP,
integration_name='IPinfo_v2',
reliability=reliability,
score=Common.DBotScore.NONE),
asn=asn,
hostname=hostname,
feed_related_indicators=feed_related_indicators,
geo_latitude=str(lat) if lat else None,
geo_longitude=str(lon) if lon else None,
geo_description=description or None,
geo_country=country,
tags=','.join(tags))
command_results.append(CommandResults(
readable_output=tableToMarkdown(f'IPinfo results for {ip}', raw_result),
raw_response=raw_result,
outputs_prefix='IPinfo.IP',
outputs=entry_context,
outputs_key_field=outputs_key_field,
indicator=indicator))
if lat and lon:
map_output = CommandResults(raw_response={'lat': lat, 'lng': lon},
entry_type=EntryType.MAP_ENTRY_TYPE,
outputs_key_field=outputs_key_field,
indicator=indicator)
command_results.append(map_output)
return command_results
|
55,778 |
def _maybe_rerun_with_macos_fixes():
"""
Apply some fixes needed in macOS, which might involve
running this script again using a different sys.executable.
1) Quick fix for Big Sur Python 3.9 and Qt 5.
No relaunch needed.
2) Using `pythonw` instead of `python`.
This can be used to ensure we're using a framework
build of Python on macOS, which fixes frozen menubar issues
in some macOS versions.
3) Make sure the menu bar uses 'napari' as the display name.
This requires relaunching the app from a symlink to the
desired python executable, conveniently named 'napari'.
"""
if sys.platform != "darwin":
return
if "_NAPARI_RERUN_WITH_FIXES" in os.environ:
# This function already ran, do not recurse!
# We also restore sys.executable to its initial value,
# if we used a symlink
if exe := os.environ.pop("_NAPARI_SYMLINKED_EXECUTABLE", ""):
sys.executable = exe
return
import platform
import subprocess
from tempfile import mkdtemp
from qtpy import API_NAME
# In principle, we will relaunch to the same python we were using
executable = sys.executable
cwd = Path.cwd()
_MACOS_AT_LEAST_CATALINA = int(platform.release().split('.')[0]) >= 19
_MACOS_AT_LEAST_BIG_SUR = int(platform.release().split('.')[0]) >= 20
_RUNNING_CONDA = "CONDA_PREFIX" in os.environ
_RUNNING_PYTHONW = "PYTHONEXECUTABLE" in os.environ
# 1) quick fix for Big Sur py3.9 and qt 5
# https://github.com/napari/napari/pull/1894
if _MACOS_AT_LEAST_BIG_SUR and '6' not in API_NAME:
os.environ['QT_MAC_WANTS_LAYER'] = '1'
# Create the env copy now because the following changes
# should not persist in the current process in case
# we do not run the subprocess!
env = os.environ.copy()
# 2) Ensure we're always using a "framework build" on the latest
# macOS to ensure menubar works without needing to refocus napari.
# We try this for macOS later than the Catalina release
# See https://github.com/napari/napari/pull/1554 and
# https://github.com/napari/napari/issues/380#issuecomment-659656775
# and https://github.com/ContinuumIO/anaconda-issues/issues/199
if (
_MACOS_AT_LEAST_CATALINA
and not _MACOS_AT_LEAST_BIG_SUR
and _RUNNING_CONDA
and not _RUNNING_PYTHONW
):
pythonw_path = Path(sys.exec_prefix) / 'bin' / 'pythonw'
if pythonw_path.exists():
# Use this one instead of sys.executable to relaunch
# the subprocess
executable = pythonw_path
else:
msg = (
'pythonw executable not found.\n'
'To unfreeze the menubar on macOS, '
'click away from napari to another app, '
'then reactivate napari. To avoid this problem, '
'please install python.app in conda using:\n'
'conda install -c conda-forge python.app'
)
warnings.warn(msg)
# 3) Make sure the app name in the menu bar is 'napari', not 'python'
tempdir = None
_NEEDS_SYMLINK = (
# When napari is launched from the conda bundle shortcut
# it already has the right 'napari' name in the app title
# and __CFBundleIdentifier is set to 'com.napari._(<version>)'
"napari" not in os.environ.get("__CFBundleIdentifier", "")
# with a sys.executable named napari,
# macOS should have picked the right name already
or os.path.basename(executable) != "napari"
)
if _NEEDS_SYMLINK:
tempdir = mkdtemp(prefix="symlink-to-fix-macos-menu-name-")
# By using a symlink with basename napari
# we make macOS take 'napari' as the program name
napari_link = os.path.join(tempdir, "napari")
os.symlink(executable, napari_link)
# Pass original executable to the subprocess so it can restore it later
env["_NAPARI_SYMLINKED_EXECUTABLE"] = executable
executable = napari_link
# if at this point 'executable' is different from 'sys.executable', we
# need to launch the subprocess to apply the fixes
if sys.executable != executable:
env["_NAPARI_RERUN_WITH_FIXES"] = "1"
if Path(sys.argv[0]).name == "napari":
# launched through entry point, we do that again to
# avoid issues with working directory getting into sys.path
cmd = [executable, sys.argv[0]]
else: # we assume it must have been launched via '-m' syntax
cmd = [executable, "-m", "napari"]
# Append original command line arguments.
if len(sys.argv) > 1:
cmd.extend(sys.argv[1:])
try:
result = subprocess.run(cmd, env=env, cwd=cwd)
sys.exit(result.returncode)
finally:
if tempdir is not None:
import shutil
shutil.rmtree(tempdir)
|
def _maybe_rerun_with_macos_fixes():
"""
Apply some fixes needed in macOS, which might involve
running this script again using a different sys.executable.
1) Quick fix for Big Sur Python 3.9 and Qt 5.
No relaunch needed.
2) Using `pythonw` instead of `python`.
This can be used to ensure we're using a framework
build of Python on macOS, which fixes frozen menubar issues
in some macOS versions.
3) Make sure the menu bar uses 'napari' as the display name.
This requires relaunching the app from a symlink to the
desired python executable, conveniently named 'napari'.
"""
if sys.platform != "darwin":
return
if "_NAPARI_RERUN_WITH_FIXES" in os.environ:
# This function already ran, do not recurse!
# We also restore sys.executable to its initial value,
# if we used a symlink
if exe := os.environ.pop("_NAPARI_SYMLINKED_EXECUTABLE", ""):
sys.executable = exe
return
import platform
import subprocess
from tempfile import mkdtemp
from qtpy import API_NAME
# In principle, we will relaunch to the same python we were using
executable = sys.executable
cwd = Path.cwd()
_MACOS_AT_LEAST_CATALINA = int(platform.release().split('.')[0]) >= 19
_MACOS_AT_LEAST_BIG_SUR = int(platform.release().split('.')[0]) >= 20
_RUNNING_CONDA = "CONDA_PREFIX" in os.environ
_RUNNING_PYTHONW = "PYTHONEXECUTABLE" in os.environ
# 1) quick fix for Big Sur py3.9 and qt 5
# https://github.com/napari/napari/pull/1894
if _MACOS_AT_LEAST_BIG_SUR and '6' not in API_NAME:
os.environ['QT_MAC_WANTS_LAYER'] = '1'
# Create the env copy now because the following changes
# should not persist in the current process in case
# we do not run the subprocess!
env = os.environ.copy()
# 2) Ensure we're always using a "framework build" on the latest
# macOS to ensure menubar works without needing to refocus napari.
# We try this for macOS later than the Catalina release
# See https://github.com/napari/napari/pull/1554 and
# https://github.com/napari/napari/issues/380#issuecomment-659656775
# and https://github.com/ContinuumIO/anaconda-issues/issues/199
if (
_MACOS_AT_LEAST_CATALINA
and not _MACOS_AT_LEAST_BIG_SUR
and _RUNNING_CONDA
and not _RUNNING_PYTHONW
):
pythonw_path = Path(sys.exec_prefix) / 'bin' / 'pythonw'
if pythonw_path.exists():
# Use this one instead of sys.executable to relaunch
# the subprocess
executable = pythonw_path
else:
msg = (
'pythonw executable not found.\n'
'To unfreeze the menubar on macOS, '
'click away from napari to another app, '
'then reactivate napari. To avoid this problem, '
'please install python.app in conda using:\n'
'conda install -c conda-forge python.app'
)
warnings.warn(msg)
# 3) Make sure the app name in the menu bar is 'napari', not 'python'
tempdir = None
_NEEDS_SYMLINK = (
# When napari is launched from the conda bundle shortcut
# it already has the right 'napari' name in the app title
# and __CFBundleIdentifier is set to 'com.napari._(<version>)'
"napari" not in os.environ.get("__CFBundleIdentifier", "")
# with a sys.executable named napari,
# macOS should have picked the right name already
or os.path.basename(executable) != "napari"
)
if _NEEDS_SYMLINK:
tempdir = mkdtemp(prefix="symlink-to-fix-macos-menu-name-")
# By using a symlink with basename napari
# we make macOS take 'napari' as the program name
napari_link = os.path.join(tempdir, "napari")
os.symlink(executable, napari_link)
# Pass original executable to the subprocess so it can restore it later
env["_NAPARI_SYMLINKED_EXECUTABLE"] = executable
executable = napari_link
# if at this point 'executable' is different from 'sys.executable', we
# need to launch the subprocess to apply the fixes
if sys.executable != executable:
env["_NAPARI_RERUN_WITH_FIXES"] = "1"
if Path(sys.argv[0]).name == "napari":
# launched through entry point, we do that again to avoid
# issues with working directory getting into sys.path (#5007)
cmd = [executable, sys.argv[0]]
else: # we assume it must have been launched via '-m' syntax
cmd = [executable, "-m", "napari"]
# Append original command line arguments.
if len(sys.argv) > 1:
cmd.extend(sys.argv[1:])
try:
result = subprocess.run(cmd, env=env, cwd=cwd)
sys.exit(result.returncode)
finally:
if tempdir is not None:
import shutil
shutil.rmtree(tempdir)
|
58,745 |
def test_rasa_x_raises_warning_and_exits_without_production_flag():
args = argparse.Namespace(loglevel=None, log_file=None, production=None)
with pytest.raises(SystemExit):
with pytest.warns(
UserWarning,
match="Running Rasa X in local mode is no longer supported as Rasa has "
"stopped supporting the Community Edition (free version) of ‘Rasa X’."
"For more information please see https://rasa.com/blog/rasa-x-community-edition-changes/",
):
x.rasa_x(args)
|
def test_rasa_x_raises_warning_and_exits_without_production_flag():
args = argparse.Namespace(loglevel=None, log_file=None, production=None)
with pytest.raises(SystemExit):
with pytest.warns(
UserWarning,
match="Running Rasa X in local mode is no longer supported as Rasa has "
"stopped supporting the Community Edition (free version) of ‘Rasa X’."
"For more information please see"
"https://rasa.com/blog/rasa-x-community-edition-changes/",
):
x.rasa_x(args)
|
2,579 |
def close_issue_if_opened():
print("Test has no failures!")
issue = get_issue()
if issue is not None:
print(f"Closing issue #{issue.number}")
new_body = (
"## Closed issue because CI is no longer failing! ✅\n\n"
f"[Successful run]({args.link_to_ci_run})\n\n"
"## Previous failing issue\n\n"
f"{issue.body}"
)
issue.edit(state="closed", body=new_body)
sys.exit()
|
def close_issue_if_opened():
print("Test has no failures!")
issue = get_issue()
if issue is not None:
print(f"Closing issue #{issue.number}")
new_body = (
"## Closed issue because CI is no longer failing! ✅\n\n"
f"[Successful run]({args.link_to_ci_run})\n\n"
"## Previous failure report\n\n"
f"{issue.body}"
)
issue.edit(state="closed", body=new_body)
sys.exit()
|
28,973 |
def dynamic_cooldown(
cooldown: Callable[[Context[Any]], Optional[Cooldown]],
type: Union[BucketType, Callable[[Context[Any]], Any]],
) -> Callable[[T], T]:
"""A decorator that adds a dynamic cooldown to a :class:`.Command`
This differs from :func:`.cooldown` in that it takes a function that
accepts a single parameter of type :class:`.Context` and must
return a :class:`.cooldowns.Cooldown` or ``None``.
If ``None`` is returned then that cooldown is effectively bypassed.
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
.. versionadded:: 2.0
Parameters
------------
cooldown: Callable[[:class:`.Context`], Optional[:class:`.cooldowns.Cooldown`]]
A function that takes a message and returns a cooldown that will
apply to this invocation or ``None`` if the cooldown should be bypassed.
type: :class:`.BucketType`
The type of cooldown to have.
"""
if not callable(cooldown):
raise TypeError("A callable must be provided")
if type is BucketType.default:
raise ValueError('BucketType.default cannot be used in dynamic cooldowns')
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = DynamicCooldownMapping(cooldown, type)
else:
func.__commands_cooldown__ = DynamicCooldownMapping(cooldown, type)
return func
return decorator # type: ignore
|
def dynamic_cooldown(
cooldown: Callable[[Context[Any]], Optional[Cooldown]],
type: Union[BucketType, Callable[[Context[Any]], Any]],
) -> Callable[[T], T]:
"""A decorator that adds a dynamic cooldown to a :class:`.Command`
This differs from :func:`.cooldown` in that it takes a function that
accepts a single parameter of type :class:`.Context` and must
return a :class:`~discord.app_commands.Cooldown` or ``None``.
If ``None`` is returned then that cooldown is effectively bypassed.
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
.. versionadded:: 2.0
Parameters
------------
cooldown: Callable[[:class:`.Context`], Optional[:class:`.cooldowns.Cooldown`]]
A function that takes a message and returns a cooldown that will
apply to this invocation or ``None`` if the cooldown should be bypassed.
type: :class:`.BucketType`
The type of cooldown to have.
"""
if not callable(cooldown):
raise TypeError("A callable must be provided")
if type is BucketType.default:
raise ValueError('BucketType.default cannot be used in dynamic cooldowns')
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = DynamicCooldownMapping(cooldown, type)
else:
func.__commands_cooldown__ = DynamicCooldownMapping(cooldown, type)
return func
return decorator # type: ignore
|
33,458 |
def main():
parser = argparse.ArgumentParser(
"python -m can.logconvert",
description="Convert a log file from one format to another.",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="""Output filename, type dependent on suffix see can.LogReader.""",
default=None,
required=True,
)
parser.add_argument(
"-s",
"--file_size",
dest="file_size",
type=int,
help="""Maximum file size in bytes. Rotate log file when size threshold is reached.""",
default=None,
)
parser.add_argument(
"infile",
metavar="input-file",
type=str,
help="Log file to convert from. For supported types see can.LogReader.",
)
# print help message when no arguments were given
if len(sys.argv) < 2:
parser.print_help(sys.stderr)
raise SystemExit(errno.EINVAL)
results = parser.parse_args()
reader = LogReader(results.infile)
if results.file_size:
logger = SizedRotatingLogger(
base_filename=results.output, max_bytes=results.file_size
)
else:
logger = Logger(filename=results.output)
try:
for m in reader: # pylint: disable=not-an-iterable
logger(m)
except KeyboardInterrupt:
pass
finally:
logger.stop()
|
def main():
parser = argparse.ArgumentParser(
"python -m can.logconvert",
description="Convert a log file from one format to another.",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="""Output filename, type dependent on suffix see can.LogReader.""",
default=None,
required=True,
)
parser.add_argument(
"-s",
"--file_size",
dest="file_size",
type=int,
help="Maximum file size in bytes. Rotate log file when size threshold is reached.",
default=None,
)
parser.add_argument(
"infile",
metavar="input-file",
type=str,
help="Log file to convert from. For supported types see can.LogReader.",
)
# print help message when no arguments were given
if len(sys.argv) < 2:
parser.print_help(sys.stderr)
raise SystemExit(errno.EINVAL)
results = parser.parse_args()
reader = LogReader(results.infile)
if results.file_size:
logger = SizedRotatingLogger(
base_filename=results.output, max_bytes=results.file_size
)
else:
logger = Logger(filename=results.output)
try:
for m in reader: # pylint: disable=not-an-iterable
logger(m)
except KeyboardInterrupt:
pass
finally:
logger.stop()
|
31,508 |
def return_results(results):
"""
This function wraps the demisto.results(), supports.
:type results: ``CommandResults`` or ``str`` or ``dict`` or ``BaseWidget`` or ``list``
:param results: A result object to return as a War-Room entry.
:return: None
:rtype: ``None``
"""
if results is None:
# backward compatibility reasons
demisto.results(None)
return
elif results and isinstance(results, list) and len(results) > 0:
result_list = []
for result in results:
if isinstance(result, (dict, str)):
# Results of type dict or str are of the old results format and work with demisto.results()
result_list.append(result)
else:
# The rest are of the new format and have a corresponding function (to_context, to_display, etc...)
return_results(result)
if result_list:
demisto.results(result_list)
elif isinstance(results, CommandResults):
demisto.results(results.to_context())
elif isinstance(results, BaseWidget):
demisto.results(results.to_display())
elif isinstance(results, GetMappingFieldsResponse):
demisto.results(results.extract_mapping())
elif isinstance(results, GetRemoteDataResponse):
demisto.results(results.extract_for_local())
elif isinstance(results, GetModifiedRemoteDataResponse):
demisto.results(results.to_entry())
elif hasattr(results, 'to_entry'):
demisto.results(results.to_entry())
else:
demisto.results(results)
|
def return_results(results):
"""
This function wraps the demisto.results(), supports.
:type results: ``CommandResults`` or ``str`` or ``dict`` or ``BaseWidget`` or ``list``
:param results: A result object to return as a War-Room entry.
:return: None
:rtype: ``None``
"""
if results is None:
# backward compatibility reasons
demisto.results(None)
return
elif results and isinstance(results, list):
result_list = []
for result in results:
if isinstance(result, (dict, str)):
# Results of type dict or str are of the old results format and work with demisto.results()
result_list.append(result)
else:
# The rest are of the new format and have a corresponding function (to_context, to_display, etc...)
return_results(result)
if result_list:
demisto.results(result_list)
elif isinstance(results, CommandResults):
demisto.results(results.to_context())
elif isinstance(results, BaseWidget):
demisto.results(results.to_display())
elif isinstance(results, GetMappingFieldsResponse):
demisto.results(results.extract_mapping())
elif isinstance(results, GetRemoteDataResponse):
demisto.results(results.extract_for_local())
elif isinstance(results, GetModifiedRemoteDataResponse):
demisto.results(results.to_entry())
elif hasattr(results, 'to_entry'):
demisto.results(results.to_entry())
else:
demisto.results(results)
|
3,446 |
def parse_size(value, size):
"""Returns in total bytes"""
try:
size_value = float(value)
except ValueError:
raise InvalidQuery(f"{value} is not a valid size value")
if size == "bit":
byte = size_value / 8
elif size == "nb":
byte = size_value / 2
elif size == "bytes":
byte = size_value
elif size == "kb":
byte = size_value * 1024
elif size == "mb":
byte = size_value * 1024**2
elif size == "gb":
byte = size_value * 1024**3
elif size == "tb":
byte = size_value * 1024**4
elif size == "pb":
byte = size_value * 1024**5
elif size == "eb":
byte = size_value * 1024**6
elif size == "zb":
byte = size_value * 1024**7
elif size == "yb":
byte = size_value * 1024**8
else:
raise InvalidQuery(
f"{size} is not a valid size type, must be bit, bytes, kb, mb, gb, tb, pb, eb, zb, yb"
)
return byte
|
def parse_size(value, size):
"""Returns in total bytes"""
try:
size_value = float(value)
except ValueError:
raise InvalidQuery(f"{value} is not a valid size value")
if size == "bit":
byte = size_value / 8
elif size == "nb":
byte = size_value / 2
elif size == "bytes":
byte = size_value
elif size == "kb":
byte = size_value * 1024
elif size == "mb":
byte = size_value * 1024**2
elif size == "gb":
byte = size_value * 1024**3
elif size == "tb":
byte = size_value * 1024**4
elif size == "pb":
byte = size_value * 1024**5
elif size == "eb":
byte = size_value * 1024**6
elif size == "zb":
byte = size_value * 1024**7
elif size == "yb":
byte = size_value * 1024**8
else:
raise InvalidQuery(
f"{size} is not a valid size type, must be bit, nb, bytes, kb, mb, gb, tb, pb, eb, zb, yb"
)
return byte
|
30,556 |
def fetch_incidents(service):
last_run = demisto.getLastRun() and demisto.getLastRun()['time']
search_offset = demisto.getLastRun().get('offset', 0)
incidents = []
current_time_for_fetch = datetime.utcnow()
if demisto.get(demisto.params(), 'timezone'):
timezone = demisto.params()['timezone']
current_time_for_fetch = current_time_for_fetch + timedelta(minutes=int(timezone))
now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
if demisto.get(demisto.params(), 'useSplunkTime'):
now = get_current_splunk_time(service)
current_time_for_fetch = datetime.strptime(now, SPLUNK_TIME_FORMAT)
if len(last_run) == 0:
fetch_time_in_minutes = parse_time_to_minutes()
current_time_for_fetch = current_time_for_fetch - timedelta(minutes=fetch_time_in_minutes)
last_run = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
earliest_fetch_time_fieldname = demisto.params().get("earliest_fetch_time_fieldname", "index_earliest")
latest_fetch_time_fieldname = demisto.params().get("latest_fetch_time_fieldname", "index_latest")
kwargs_oneshot = {earliest_fetch_time_fieldname: last_run,
latest_fetch_time_fieldname: now, "count": FETCH_LIMIT, 'offset': search_offset}
searchquery_oneshot = demisto.params()['fetchQuery']
if demisto.get(demisto.params(), 'extractFields'):
extractFields = demisto.params()['extractFields']
extra_raw_arr = extractFields.split(',')
for field in extra_raw_arr:
field_trimmed = field.strip()
searchquery_oneshot = searchquery_oneshot + ' | eval ' + field_trimmed + '=' + field_trimmed
oneshotsearch_results = service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
for item in reader:
inc = notable_to_incident(item)
incidents.append(inc)
demisto.incidents(incidents)
if len(incidents) < FETCH_LIMIT:
demisto.setLastRun({'time': now, 'offset': 0})
else:
demisto.setLastRun({'time': last_run, 'offset': search_offset + FETCH_LIMIT})
|
def fetch_incidents(service):
last_run = demisto.getLastRun() and demisto.getLastRun()['time']
search_offset = demisto.getLastRun().get('offset', 0)
incidents = []
current_time_for_fetch = datetime.utcnow()
if demisto.get(demisto.params(), 'timezone'):
timezone = demisto.params()['timezone']
current_time_for_fetch = current_time_for_fetch + timedelta(minutes=int(timezone))
now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
if demisto.get(demisto.params(), 'useSplunkTime'):
now = get_current_splunk_time(service)
current_time_for_fetch = datetime.strptime(now, SPLUNK_TIME_FORMAT)
if len(last_run) == 0:
fetch_time_in_minutes = parse_time_to_minutes()
start_time_for_fetch = current_time_for_fetch - timedelta(minutes=fetch_time_in_minutes)
last_run = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
earliest_fetch_time_fieldname = demisto.params().get("earliest_fetch_time_fieldname", "index_earliest")
latest_fetch_time_fieldname = demisto.params().get("latest_fetch_time_fieldname", "index_latest")
kwargs_oneshot = {earliest_fetch_time_fieldname: last_run,
latest_fetch_time_fieldname: now, "count": FETCH_LIMIT, 'offset': search_offset}
searchquery_oneshot = demisto.params()['fetchQuery']
if demisto.get(demisto.params(), 'extractFields'):
extractFields = demisto.params()['extractFields']
extra_raw_arr = extractFields.split(',')
for field in extra_raw_arr:
field_trimmed = field.strip()
searchquery_oneshot = searchquery_oneshot + ' | eval ' + field_trimmed + '=' + field_trimmed
oneshotsearch_results = service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
for item in reader:
inc = notable_to_incident(item)
incidents.append(inc)
demisto.incidents(incidents)
if len(incidents) < FETCH_LIMIT:
demisto.setLastRun({'time': now, 'offset': 0})
else:
demisto.setLastRun({'time': last_run, 'offset': search_offset + FETCH_LIMIT})
|
8,070 |
def _find_config_files():
"""Finds locations of SunPy configuration files"""
config_files = []
config_filename = 'sunpyrc'
# find default configuration file
module_dir = Path(sunpy.__file__).parent
config_files.append(str(module_dir / 'data' / 'sunpyrc'))
# if a user configuration file exists, add that to list of files to read
# so that any values set there will override ones specified in the default
# config file
config_path = Path(_get_user_configdir())
if config_path.joinpath(config_filename).exists():
config_files.append(str(config_path.joinpath(config_filename)))
return config_files
|
def _find_config_files():
"""Finds locations of SunPy configuration files"""
config_files = []
config_files.append(f"{module_dir / 'data' / 'sunpyrc'}")
# find default configuration file
module_dir = Path(sunpy.__file__).parent
config_files.append(str(module_dir / 'data' / 'sunpyrc'))
# if a user configuration file exists, add that to list of files to read
# so that any values set there will override ones specified in the default
# config file
config_path = Path(_get_user_configdir())
if config_path.joinpath(config_filename).exists():
config_files.append(str(config_path.joinpath(config_filename)))
return config_files
|
35,071 |
def make_model(
shape,
kernel_shape,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
out_channels,
weight_format,
enable_bias,
relu_type,
):
"""Return a model and any parameters it may have"""
h_index = weight_format.index("H")
w_index = weight_format.index("W")
kernel_h = kernel_shape[h_index]
kernel_w = kernel_shape[w_index]
a = relay.var("in0", shape=shape, dtype=dtype)
p = (0, 0, 0, 0)
if padding == "SAME":
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
a = relay.nn.pad(
a,
pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
pad_value=input_zp,
pad_mode="constant",
)
shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])
weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
w = tvm.nd.array(
np.random.randint(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
size=weight_shape,
dtype=kernel_dtype,
)
)
weights = relay.const(w, kernel_dtype)
conv = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p,
out_dtype="int32",
)
b = tvm.nd.array(np.random.randint(0, high=10, size=(out_channels,), dtype="int32"))
bc = relay.const(b, "int32")
bias = conv
if enable_bias:
bias = relay.nn.bias_add(conv, bc, axis=3)
requant_input_sc = [sc * input_sc for sc in kernel_sc]
req = relay.qnn.op.requantize(
bias,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
relu = make_qnn_relu(req, relu_type, output_sc, output_zp, dtype)
params = {"w": w, "b": b}
return relu, params
|
def make_model(
shape,
kernel_shape,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
out_channels,
weight_format,
enable_bias,
relu_type,
):
"""Return a model and any parameters it may have"""
h_index = weight_format.index("H")
w_index = weight_format.index("W")
kernel_h = kernel_shape[h_index]
kernel_w = kernel_shape[w_index]
input = relay.var("input", shape=shape, dtype=dtype)
p = (0, 0, 0, 0)
if padding == "SAME":
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
a = relay.nn.pad(
a,
pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
pad_value=input_zp,
pad_mode="constant",
)
shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])
weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
w = tvm.nd.array(
np.random.randint(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
size=weight_shape,
dtype=kernel_dtype,
)
)
weights = relay.const(w, kernel_dtype)
conv = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p,
out_dtype="int32",
)
b = tvm.nd.array(np.random.randint(0, high=10, size=(out_channels,), dtype="int32"))
bc = relay.const(b, "int32")
bias = conv
if enable_bias:
bias = relay.nn.bias_add(conv, bc, axis=3)
requant_input_sc = [sc * input_sc for sc in kernel_sc]
req = relay.qnn.op.requantize(
bias,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
relu = make_qnn_relu(req, relu_type, output_sc, output_zp, dtype)
params = {"w": w, "b": b}
return relu, params
|
30,564 |
def splunk_job_status(service):
job = service.job(demisto.args()['sid'])
status = job.state.content['dispatchState']
entry_context = {
'SID': demisto.args()['sid'],
'Status': status
}
context = {'Splunk.JobStatus(val.ID && val.ID === obj.ID)': entry_context}
human_readable = tableToMarkdown('Splunk Job Status', entry_context)
demisto.results({
"Type": 1,
"Contents": 'Splunk Job Status',
"ContentsFormat": "json",
"EntryContext": context,
"HumanReadable": human_readable
})
|
def splunk_job_status(service):
job = service.job(demisto.args()['sid'])
status = job.state.content['dispatchState']
entry_context = {
'SID': demisto.args()['sid'],
'Status': status
}
context = {'Splunk.JobStatus(val.ID && val.ID === obj.ID)': entry_context}
human_readable = tableToMarkdown('Splunk Job Status', entry_context)
demisto.results({
"Type": entryTypes['note'],
"Contents": 'Splunk Job Status',
"ContentsFormat": "json",
"EntryContext": context,
"HumanReadable": human_readable
})
|
27,904 |
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- The function maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintain the moving
average of standard deviations :math:`\\sigma`.
- The function applies Bessel's correction to update the moving average
of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
|
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- The function maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintains the moving
average of standard deviations :math:`\\sigma`.
- The function applies Bessel's correction to update the moving average
of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
|
59,940 |
def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
output_options.add_argument(
'--report-junit', action='store', metavar='FILE',
help="Store a JUnit report in FILE",
envvar='RFM_REPORT_JUNIT',
configvar='general/report_junit'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='%FT%T',
metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help=('Skip checks with conflicting names '
'(this option is deprecated and has no effect)'),
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
# Select options
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'--exclude-tags', action='append', dest='exclude_tags',
metavar='PATTERN', default=[],
help='Exclude checks whose tag matches PATTERN'
)
# Action options
action_options.add_argument(
'--ci-generate', action='store', metavar='FILE',
help=('Generate into FILE a Gitlab CI pipeline '
'for the selected tests and exit'),
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'--list-tags', action='store_true',
help='List the unique tags found in the selected tests and exit'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--maxfail', metavar='NUM', action='store', default=sys.maxsize,
help='Exit after first NUM failures'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'-S', '--setvar', action='append', metavar='[TEST.]VAR=VAL',
dest='vars', default=[],
help=('Set test variable VAR to VAL in all tests '
'or optionally in TEST only')
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
# Environment options
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--detect-host-topology', action='store', nargs='?', const='-',
help='Detect the local host topology and exit'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='httpjson_url',
envvar='RFM_HTTPJSON_URL',
configvar='logging/handlers_perflog/httpjson_url',
help='URL of HTTP server accepting JSON logs'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='remote_detect',
envvar='RFM_REMOTE_DETECT',
configvar='general/remote_detect',
action='store_true',
help='Detect remote system topology'
)
argparser.add_argument(
dest='remote_workdir',
envvar='RFM_REMOTE_WORKDIR',
configvar='general/remote_workdir',
action='store',
help='Working directory for launching ReFrame remotely'
)
argparser.add_argument(
dest='resolve_module_conflicts',
envvar='RFM_RESOLVE_MODULE_CONFLICTS',
configvar='general/resolve_module_conflicts',
action='store_true',
help='Resolve module conflicts automatically'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
# Parse command line
options = argparser.parse_args()
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
if site_config.get('general/0/ignore_check_conflicts'):
logging.getlogger().warning(
"the 'ignore_check_conflicts' option is deprecated "
"and will be removed in the future"
)
rt = runtime.runtime()
autodetect.detect_topology()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
if options.detect_host_topology:
from reframe.utility.cpuinfo import cpuinfo
topofile = options.detect_host_topology
if topofile == '-':
json.dump(cpuinfo(), sys.stdout, indent=2)
sys.stdout.write('\n')
else:
try:
with open(topofile, 'w') as fp:
json.dump(cpuinfo(), fp, indent=2)
fp.write('\n')
except OSError as e:
getlogger().error(
f'could not write topology file: {topofile!r}'
)
sys.exit(1)
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a list of reports
if options.restore_session:
filenames = options.restore_session.split(',')
else:
filenames = [runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)]
report = runreport.load_report(*filenames)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
# Collect any variables set from the command line
external_vars = {}
for expr in options.vars:
try:
lhs, rhs = expr.split('=', maxsplit=1)
except ValueError:
printer.warning(
f'invalid test variable assignment: {expr!r}; skipping'
)
else:
external_vars[lhs] = rhs
loader = RegressionCheckLoader(check_search_path,
check_search_recursive,
external_vars)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.getfqdn(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
if options.exclude_tags:
for tag in options.exclude_tags:
testcases = filter(filters.have_not_tag(tag), testcases)
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if not rec:
return False
return (rec['result'] == 'failure' or
rec['result'] == 'aborted')
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
tc.check.disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if options.list_tags:
list_tags(testcases, printer)
sys.exit(0)
if options.ci_generate:
list_checks(testcases, printer)
printer.info('[Generate CI]')
with open(options.ci_generate, 'wt') as fp:
ci.emit_pipeline(fp, testcases)
printer.info(
f' Gitlab pipeline generated successfully '
f'in {options.ci_generate!r}.\n'
)
sys.exit(0)
if not options.run:
printer.error("No action option specified. Available options:\n"
" - `-l'/`-L' for listing\n"
" - `-r' for running\n"
" - `--list-tags' for listing unique test tags\n"
" - `--ci-generate' for generating a CI pipeline\n"
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
opt_split = opt.split('=', maxsplit=1)
optstr = opt_split[0]
valstr = opt_split[1] if len(opt_split) > 1 else ''
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(optstr) == 1:
parsed_job_options.append(f'-{optstr} {valstr}')
else:
parsed_job_options.append(f'--{optstr} {valstr}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
try:
max_failures = int(options.maxfail)
if max_failures < 0:
raise errors.ConfigError(
f'--maxfail should be a non-negative integer: '
f'{options.maxfail!r}'
)
except ValueError:
raise errors.ConfigError(
f'--maxfail is not a valid integer: {options.maxfail!r}'
) from None
runner = Runner(exec_policy, printer, max_retries, max_failures)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failed(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failed():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
printer.info(f'Run report saved in {report_file!r}')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
# Generate the junit xml report for this session
junit_report_file = rt.get_option('general/0/report_junit')
if junit_report_file:
# Expand variables in filename
junit_report_file = osext.expandvars(junit_report_file)
junit_xml = runreport.junit_xml_report(json_report)
try:
with open(junit_report_file, 'w') as fp:
runreport.junit_dump(junit_xml, fp)
except OSError as e:
printer.warning(
f'failed to generate report in {junit_report_file!r}: '
f'{e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except (Exception, KeyboardInterrupt, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(f'run session stopped: {errors.what(*exc_info)}')
if errors.is_exit_request(*exc_info):
# Print stack traces for exit requests only when TOO verbose
printer.debug2(tb)
elif errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
|
def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
output_options.add_argument(
'--report-junit', action='store', metavar='FILE',
help="Store a JUnit report in FILE",
envvar='RFM_REPORT_JUNIT',
configvar='general/report_junit'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='%FT%T',
metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help=('Skip checks with conflicting names '
'(this option is deprecated and has no effect)'),
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
# Select options
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'--exclude-tags', action='append', dest='exclude_tags',
metavar='PATTERN', default=[],
help='Exclude checks whose tag matches PATTERN'
)
# Action options
action_options.add_argument(
'--ci-generate', action='store', metavar='FILE',
help=('Generate into FILE a Gitlab CI pipeline '
'for the selected tests and exit'),
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'--list-tags', action='store_true',
help='List the unique tags found in the selected tests and exit'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--maxfail', metavar='NUM', action='store', default=sys.maxsize,
help='Exit after first NUM failures'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'-S', '--setvar', action='append', metavar='[TEST.]VAR=VAL',
dest='vars', default=[],
help=('Set test variable VAR to VAL in all tests '
'or optionally in TEST only')
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
# Environment options
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--detect-host-topology', action='store', nargs='?', const='-',
help='Detect the local host topology and exit'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='httpjson_url',
envvar='RFM_HTTPJSON_URL',
configvar='logging/handlers_perflog/httpjson_url',
help='URL of HTTP server accepting JSON logs'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='remote_detect',
envvar='RFM_REMOTE_DETECT',
configvar='general/remote_detect',
action='store_true',
help='Detect remote system topology'
)
argparser.add_argument(
dest='remote_workdir',
envvar='RFM_REMOTE_WORKDIR',
configvar='general/remote_workdir',
action='store',
help='Working directory for launching ReFrame remotely'
)
argparser.add_argument(
dest='resolve_module_conflicts',
envvar='RFM_RESOLVE_MODULE_CONFLICTS',
configvar='general/resolve_module_conflicts',
action='store_true',
help='Resolve module conflicts automatically'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
# Parse command line
options = argparser.parse_args()
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
if site_config.get('general/0/ignore_check_conflicts'):
logging.getlogger().warning(
"the 'ignore_check_conflicts' option is deprecated "
"and will be removed in the future"
)
rt = runtime.runtime()
autodetect.detect_topology()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
if options.detect_host_topology:
from reframe.utility.cpuinfo import cpuinfo
topofile = options.detect_host_topology
if topofile == '-':
json.dump(cpuinfo(), sys.stdout, indent=2)
sys.stdout.write('\n')
else:
try:
with open(topofile, 'w') as fp:
json.dump(cpuinfo(), fp, indent=2)
fp.write('\n')
except OSError as e:
getlogger().error(
f'could not write topology file: {topofile!r}'
)
sys.exit(1)
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a list of reports
if options.restore_session:
filenames = options.restore_session.split(',')
else:
filenames = [runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)]
report = runreport.load_report(*filenames)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
# Collect any variables set from the command line
external_vars = {}
for expr in options.vars:
try:
lhs, rhs = expr.split('=', maxsplit=1)
except ValueError:
printer.warning(
f'invalid test variable assignment: {expr!r}; skipping'
)
else:
external_vars[lhs] = rhs
loader = RegressionCheckLoader(check_search_path,
check_search_recursive,
external_vars)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.getfqdn(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.exclude_tags:
testcases = filter(filters.have_not_tag(tag), testcases)
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if not rec:
return False
return (rec['result'] == 'failure' or
rec['result'] == 'aborted')
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
tc.check.disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if options.list_tags:
list_tags(testcases, printer)
sys.exit(0)
if options.ci_generate:
list_checks(testcases, printer)
printer.info('[Generate CI]')
with open(options.ci_generate, 'wt') as fp:
ci.emit_pipeline(fp, testcases)
printer.info(
f' Gitlab pipeline generated successfully '
f'in {options.ci_generate!r}.\n'
)
sys.exit(0)
if not options.run:
printer.error("No action option specified. Available options:\n"
" - `-l'/`-L' for listing\n"
" - `-r' for running\n"
" - `--list-tags' for listing unique test tags\n"
" - `--ci-generate' for generating a CI pipeline\n"
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
opt_split = opt.split('=', maxsplit=1)
optstr = opt_split[0]
valstr = opt_split[1] if len(opt_split) > 1 else ''
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(optstr) == 1:
parsed_job_options.append(f'-{optstr} {valstr}')
else:
parsed_job_options.append(f'--{optstr} {valstr}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
try:
max_failures = int(options.maxfail)
if max_failures < 0:
raise errors.ConfigError(
f'--maxfail should be a non-negative integer: '
f'{options.maxfail!r}'
)
except ValueError:
raise errors.ConfigError(
f'--maxfail is not a valid integer: {options.maxfail!r}'
) from None
runner = Runner(exec_policy, printer, max_retries, max_failures)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failed(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failed():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
printer.info(f'Run report saved in {report_file!r}')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
# Generate the junit xml report for this session
junit_report_file = rt.get_option('general/0/report_junit')
if junit_report_file:
# Expand variables in filename
junit_report_file = osext.expandvars(junit_report_file)
junit_xml = runreport.junit_xml_report(json_report)
try:
with open(junit_report_file, 'w') as fp:
runreport.junit_dump(junit_xml, fp)
except OSError as e:
printer.warning(
f'failed to generate report in {junit_report_file!r}: '
f'{e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except (Exception, KeyboardInterrupt, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(f'run session stopped: {errors.what(*exc_info)}')
if errors.is_exit_request(*exc_info):
# Print stack traces for exit requests only when TOO verbose
printer.debug2(tb)
elif errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
|
37,041 |
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
prev_msg = msg
if not quiet:
print('', file=_outstream)
|
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
prev_msg = msg
if not quiet:
print('', file=_outstream)
|
55,745 |
def linear_index(row_range, col_range):
"""Generate a linear index from a set of row and column indices.
A linear index starts at the top left and procedes in a raster fashion.
Paramters
---------
row_range : range
Range of rows to be accessed.
col_range : range
Range of columns to be accessed.
Returns
-------
generator
(row, column) tuples in order of a linear index.
"""
for row in row_range:
for col in col_range:
yield (row, col)
|
def linear_index(row_range, col_range):
"""Generate a linear index from a set of row and column indices.
A linear index starts at the top left and procedes in a raster fashion.
Paramters
---------
row_range : range
Range of rows to be accessed.
col_range : range
Range of columns to be accessed.
Returns
-------
generator
(row, column) tuples in order of a linear index.
"""
from itertools import product
yield from product(row_range, col_range)
|
27,763 |
def test_show_multi_test_fixture_setup_and_teardown_same_as_setup_show(pytester: Pytester):
"""Verify that SETUP/TEARDOWN messages match what comes out of --setup-show."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'session')
def sess():
return True
@pytest.fixture(scope = 'module')
def mod():
return True
@pytest.fixture(scope = 'class')
def cls():
return True
@pytest.fixture(scope = 'function')
def func():
return True
def test_outside(sess, mod, cls, func):
assert True
class TestCls:
def test_one(self, sess, mod, cls, func):
assert True
def test_two(self, sess, mod, cls, func):
assert True
"""
)
plan_result = pytester.runpytest("--setup-plan")
show_result = pytester.runpytest("--setup-show")
# the number and text of these lines should be identical
plan_lines = [
line
for line in plan_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
show_lines = [
line
for line in show_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
assert plan_lines == show_lines
|
def test_show_multi_test_fixture_setup_and_teardown_same_as_setup_show(pytester: Pytester) -> None:
"""Verify that SETUP/TEARDOWN messages match what comes out of --setup-show."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'session')
def sess():
return True
@pytest.fixture(scope = 'module')
def mod():
return True
@pytest.fixture(scope = 'class')
def cls():
return True
@pytest.fixture(scope = 'function')
def func():
return True
def test_outside(sess, mod, cls, func):
assert True
class TestCls:
def test_one(self, sess, mod, cls, func):
assert True
def test_two(self, sess, mod, cls, func):
assert True
"""
)
plan_result = pytester.runpytest("--setup-plan")
show_result = pytester.runpytest("--setup-show")
# the number and text of these lines should be identical
plan_lines = [
line
for line in plan_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
show_lines = [
line
for line in show_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
assert plan_lines == show_lines
|
25,176 |
def infer_typing_typevar(node, context_itton=None):
"""Infer a typing.TypeVar(...) or typing.NewType(...) call"""
try:
func = next(node.func.infer(context=context_itton))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if func.qname() != "typing.TypeVar":
raise UseInferenceDefault
if not node.args:
raise UseInferenceDefault
typename = node.args[0].as_string().strip("'")
node = extract_node(TYPING_TYPE_TEMPLATE.format(typename))
return node.infer(context=context_itton)
|
def infer_typing_typevar(node, context_itton=None):
"""Infer a typing.TypeVar(...) call"""
try:
func = next(node.func.infer(context=context_itton))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if func.qname() != "typing.TypeVar":
raise UseInferenceDefault
if not node.args:
raise UseInferenceDefault
typename = node.args[0].as_string().strip("'")
node = extract_node(TYPING_TYPE_TEMPLATE.format(typename))
return node.infer(context=context_itton)
|
45,690 |
def ManhattanPlot(
dataframe,
chrm="CHR",
bp="BP",
p="P",
snp="SNP",
gene="GENE",
annotation=None,
logp=True,
title="Manhattan Plot",
showgrid=True,
xlabel=None,
ylabel='-log10(p)',
point_size=5,
showlegend=True,
col=None,
suggestiveline_value=-np.log10(1e-8),
suggestiveline_color='#636efa',
suggestiveline_width=1,
genomewideline_value=-np.log10(5e-8),
genomewideline_color='#EF553B',
genomewideline_width=1,
highlight=True,
highlight_color="red",
):
"""Returns a figure for a manhattan plot.
Keyword arguments:
- dataframe (dataframe; required): A pandas dataframe which must contain at
least the following three columns:
- the chromosome number
- genomic base-pair position
- a numeric quantity to plot such as a p-value or zscore
- chrm (string; default 'CHR'): A string denoting the column name for
the chromosome. This column must be float or integer. Minimum
number of chromosomes required is 1. If you have X, Y, or MT
chromosomes, be sure to renumber these 23, 24, 25, etc.
- bp (string; default 'BP'): A string denoting the column name for the
chromosomal position.
- p (string; default 'P'): A string denoting the column name for the
float quantity to be plotted on the y-axis. This column must be
numeric. This does not have to be a p-value. It can be any numeric
quantity such as peak heights, bayes factors, test statistics. If
it is not a p-value, make sure to set logp = FALSE.
- snp (string; default 'SNP'): A string denoting the column name for
the SNP names (e.g. rs number). More generally, this column could
be anything that identifies each point being plotted. For example,
in an Epigenomewide association study (EWAS) this could be the
probe name or cg number. This column should be a character. This
argument is optional, however it is necessary to specify if you
want to highlight points on the plot using the highlight argument
in the figure method.
- gene (string; default 'GENE'): A string denoting the column name for
the GENE names. This column could be a string or a float. More
generally this could be any annotation information that you want
to include in the plot.
- annotation (string; optional): A string denoting the column name for
an annotation. This column could be a string or a float. This
could be any annotation information that you want to include in
the plot (e.g. zscore, effect size, minor allele frequency).
- logp (bool; optional): If True, the -log10 of the p-value is
plotted. It isn't very useful to plot raw p-values; however,
plotting the raw value could be useful for other genome-wide plots
(e.g., peak heights, bayes factors, test statistics, other
"scores", etc.)
- title (string; default 'Manhattan Plot') The title of the graph.
- showgrid (bool; default true): Boolean indicating whether gridlines
should be shown.
- xlabel (string; optional): Label of the x axis.
- ylabel: (string; default '-log10(p)'): Label of the y axis.
- point_size (number; default 5): Size of the points of the Scatter
plot.
- showlegend (bool; default true): Boolean indicating whether legends
should be shown.
- col (string; optional): A string representing the color of the
points of the Scatter plot. Can be in any color format accepted by
plotly_js graph_objs.
- suggestiveline_value (bool | float; default 8): A value which must
be False to deactivate the option, or a numerical value
corresponding to the p-value at which the line should be drawn.
The line has no influence on the data points.
- suggestiveline_color (string; default 'grey'): Color of the suggestive
line.
- suggestiveline_width (number; default 2): Width of the suggestive
line.
- genomewideline_value (bool | float; default -log10(5e-8)): A boolean
which must be False to deactivate the option, or a numerical value
corresponding to the p-value above which the data points are
considered significant.
- genomewideline_color (string; default 'red'): Color of the genome wide
line. Can be in any color format accepted by plotly_js
graph_objs.
- genomewideline_width (number; default 1): Width of the genome wide
line.
- highlight (bool; default true): turning on/off the highlighting of
data points considered significant.
- highlight_color (string; default 'red'): Color of the data points
highlighted because they are significant Can be in any color
format accepted by plotly_js graph_objs.
# ...
Example 1: Random Manhattan Plot
'''
dataframe = pd.DataFrame(
np.random.randint(0,100,size=(100, 3)),
columns=['P', 'CHR', 'BP'])
fig = create_manhattan(dataframe, title='XYZ Manhattan plot')
plotly.offline.plot(fig, image='png')
'''
"""
mh = _ManhattanPlot(
dataframe,
chrm=chrm,
bp=bp,
p=p,
snp=snp,
gene=gene,
annotation=annotation,
logp=logp
)
return mh.figure(
title=title,
showgrid=showgrid,
xlabel=xlabel,
ylabel=ylabel,
point_size=point_size,
showlegend=showlegend,
col=col,
suggestiveline_value=suggestiveline_value,
suggestiveline_color=suggestiveline_color,
suggestiveline_width=suggestiveline_width,
genomewideline_value=genomewideline_value,
genomewideline_color=genomewideline_color,
genomewideline_width=genomewideline_width,
highlight=highlight,
highlight_color=highlight_color
)
|
def ManhattanPlot(
dataframe,
chrm="CHR",
bp="BP",
p="P",
snp="SNP",
gene="GENE",
annotation=None,
logp=True,
title="Manhattan Plot",
showgrid=True,
xlabel=None,
ylabel='-log10(p)',
point_size=5,
showlegend=True,
col=None,
suggestiveline_value=-np.log10(1e-8),
suggestiveline_color='#636efa',
suggestiveline_width=1,
genomewideline_value=-np.log10(5e-8),
genomewideline_color='#EF553B',
genomewideline_width=1,
highlight=True,
highlight_color="red",
):
"""Returns a figure for a manhattan plot.
Keyword arguments:
- dataframe (dataframe; required): A pandas dataframe which must contain at
least the following three columns:
- the chromosome number
- genomic base-pair position
- a numeric quantity to plot such as a p-value or zscore
- chrm (string; default 'CHR'): A string denoting the column name for
the chromosome. This column must be float or integer. Minimum
number of chromosomes required is 1. If you have X, Y, or MT
chromosomes, be sure to renumber these 23, 24, 25, etc.
- bp (string; default 'BP'): A string denoting the column name for the
chromosomal position.
- p (string; default 'P'): A string denoting the column name for the
float quantity to be plotted on the y-axis. This column must be
numeric. This does not have to be a p-value. It can be any numeric
quantity such as peak heights, Bayes factors, test statistics. If
it is not a p-value, make sure to set logp = FALSE.
- snp (string; default 'SNP'): A string denoting the column name for
the SNP names (e.g. rs number). More generally, this column could
be anything that identifies each point being plotted. For example,
in an Epigenomewide association study (EWAS) this could be the
probe name or cg number. This column should be a character. This
argument is optional, however it is necessary to specify if you
want to highlight points on the plot using the highlight argument
in the figure method.
- gene (string; default 'GENE'): A string denoting the column name for
the GENE names. This column could be a string or a float. More
generally this could be any annotation information that you want
to include in the plot.
- annotation (string; optional): A string denoting the column name for
an annotation. This column could be a string or a float. This
could be any annotation information that you want to include in
the plot (e.g. zscore, effect size, minor allele frequency).
- logp (bool; optional): If True, the -log10 of the p-value is
plotted. It isn't very useful to plot raw p-values; however,
plotting the raw value could be useful for other genome-wide plots
(e.g., peak heights, bayes factors, test statistics, other
"scores", etc.)
- title (string; default 'Manhattan Plot') The title of the graph.
- showgrid (bool; default true): Boolean indicating whether gridlines
should be shown.
- xlabel (string; optional): Label of the x axis.
- ylabel: (string; default '-log10(p)'): Label of the y axis.
- point_size (number; default 5): Size of the points of the Scatter
plot.
- showlegend (bool; default true): Boolean indicating whether legends
should be shown.
- col (string; optional): A string representing the color of the
points of the Scatter plot. Can be in any color format accepted by
plotly_js graph_objs.
- suggestiveline_value (bool | float; default 8): A value which must
be False to deactivate the option, or a numerical value
corresponding to the p-value at which the line should be drawn.
The line has no influence on the data points.
- suggestiveline_color (string; default 'grey'): Color of the suggestive
line.
- suggestiveline_width (number; default 2): Width of the suggestive
line.
- genomewideline_value (bool | float; default -log10(5e-8)): A boolean
which must be False to deactivate the option, or a numerical value
corresponding to the p-value above which the data points are
considered significant.
- genomewideline_color (string; default 'red'): Color of the genome wide
line. Can be in any color format accepted by plotly_js
graph_objs.
- genomewideline_width (number; default 1): Width of the genome wide
line.
- highlight (bool; default true): turning on/off the highlighting of
data points considered significant.
- highlight_color (string; default 'red'): Color of the data points
highlighted because they are significant Can be in any color
format accepted by plotly_js graph_objs.
# ...
Example 1: Random Manhattan Plot
'''
dataframe = pd.DataFrame(
np.random.randint(0,100,size=(100, 3)),
columns=['P', 'CHR', 'BP'])
fig = create_manhattan(dataframe, title='XYZ Manhattan plot')
plotly.offline.plot(fig, image='png')
'''
"""
mh = _ManhattanPlot(
dataframe,
chrm=chrm,
bp=bp,
p=p,
snp=snp,
gene=gene,
annotation=annotation,
logp=logp
)
return mh.figure(
title=title,
showgrid=showgrid,
xlabel=xlabel,
ylabel=ylabel,
point_size=point_size,
showlegend=showlegend,
col=col,
suggestiveline_value=suggestiveline_value,
suggestiveline_color=suggestiveline_color,
suggestiveline_width=suggestiveline_width,
genomewideline_value=genomewideline_value,
genomewideline_color=genomewideline_color,
genomewideline_width=genomewideline_width,
highlight=highlight,
highlight_color=highlight_color
)
|
1,463 |
def plot_roc_curve(estimator, X, y, pos_label=None, sample_weight=None,
drop_intermediate=True, response_method="auto",
name=None, ax=None, **kwargs):
"""Plot Receiver operating characteristic (ROC) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : estimator instance
Trained classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
pos_label : int or str, default=None
The label of the positive class.
When `pos_label=None`, if y_true is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : boolean, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> from sklearn import datasets, metrics, model_selection, svm
>>> X, y = datasets.make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = model_selection.train_test_split(\
X, y, random_state=0)
>>> clf = svm.SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> metrics.plot_roc_curve(clf, X_test, y_test) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
check_matplotlib_support('plot_roc_curve')
if response_method not in ("predict_proba", "decision_function", "auto"):
raise ValueError("response_method must be 'predict_proba', "
"'decision_function' or 'auto'")
if response_method != "auto":
prediction_method = getattr(estimator, response_method, None)
if prediction_method is None:
raise ValueError(
"response method {} is not defined".format(response_method))
else:
predict_proba = getattr(estimator, 'predict_proba', None)
decision_function = getattr(estimator, 'decision_function', None)
prediction_method = predict_proba or decision_function
if prediction_method is None:
raise ValueError('response methods not defined')
if (pos_label is not None
and hasattr(estimator, "classes_")
and pos_label not in set(estimator.classes_)):
estimator_name = estimator.__class__.__name__
expected_classes = set(estimator.classes_)
raise ValueError("pos_label={} is not a valid class label for {}. "
"Expected one of {}."
.format(repr(pos_label),
estimator_name,
expected_classes))
y_pred = prediction_method(X)
if y_pred.ndim != 1:
if y_pred.shape[1] > 2:
raise ValueError("Estimator should solve a "
"binary classification problem")
y_pred = y_pred[:, 1]
fpr, tpr, _ = roc_curve(y, y_pred, pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate)
roc_auc = auc(fpr, tpr)
viz = RocCurveDisplay(fpr, tpr, roc_auc, estimator.__class__.__name__)
return viz.plot(ax=ax, name=name, **kwargs)
|
def plot_roc_curve(estimator, X, y, pos_label=None, sample_weight=None,
drop_intermediate=True, response_method="auto",
name=None, ax=None, **kwargs):
"""Plot Receiver operating characteristic (ROC) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : estimator instance
Trained classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
pos_label : int or str, default=None
The label of the positive class.
When `pos_label=None`, if y_true is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : boolean, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> from sklearn import datasets, metrics, model_selection, svm
>>> X, y = datasets.make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = model_selection.train_test_split(\
X, y, random_state=0)
>>> clf = svm.SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> metrics.plot_roc_curve(clf, X_test, y_test) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
check_matplotlib_support('plot_roc_curve')
if response_method not in ("predict_proba", "decision_function", "auto"):
raise ValueError("response_method must be 'predict_proba', "
"'decision_function' or 'auto'")
if response_method != "auto":
prediction_method = getattr(estimator, response_method, None)
if prediction_method is None:
raise ValueError(
"response method {} is not defined".format(response_method))
else:
predict_proba = getattr(estimator, 'predict_proba', None)
decision_function = getattr(estimator, 'decision_function', None)
prediction_method = predict_proba or decision_function
if prediction_method is None:
raise ValueError('response methods not defined')
if (pos_label is not None
and hasattr(estimator, "classes_")
and pos_label not in estimator.classes_):
estimator_name = estimator.__class__.__name__
expected_classes = set(estimator.classes_)
raise ValueError("pos_label={} is not a valid class label for {}. "
"Expected one of {}."
.format(repr(pos_label),
estimator_name,
expected_classes))
y_pred = prediction_method(X)
if y_pred.ndim != 1:
if y_pred.shape[1] > 2:
raise ValueError("Estimator should solve a "
"binary classification problem")
y_pred = y_pred[:, 1]
fpr, tpr, _ = roc_curve(y, y_pred, pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate)
roc_auc = auc(fpr, tpr)
viz = RocCurveDisplay(fpr, tpr, roc_auc, estimator.__class__.__name__)
return viz.plot(ax=ax, name=name, **kwargs)
|
54,290 |
def qmu(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`q_{\mu}`, for establishing an upper
limit on the strength parameter, :math:`\mu`, as defiend in
Equation (14) in :xref:`arXiv:1007.1727`.
.. math::
:nowrap:
\begin{equation}
q_{\mu} = \left\{\begin{array}{ll}
-2\ln\lambda\left(\mu\right), &\hat{\mu} < \mu,\\
0, & \hat{\mu} > \mu
\end{array}\right.
\end{equation}
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_mu = 1.0
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> pyhf.infer.test_statistics.qmu(test_mu, data, model, init_pars, par_bounds)
array(3.93824492)
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'qmu test statistic used for fit configuration with POI bounded at zero. Use qmutilde.'
)
return _qmu_like(mu, data, pdf, init_pars, par_bounds)
|
def qmu(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`q_{\mu}`, for establishing an upper
limit on the strength parameter, :math:`\mu`, as defiend in
Equation (14) in :xref:`arXiv:1007.1727`.
.. math::
:nowrap:
\begin{equation}
q_{\mu} = \left\{\begin{array}{ll}
-2\ln\lambda\left(\mu\right), &\hat{\mu} < \mu,\\
0, & \hat{\mu} > \mu
\end{array}\right.
\end{equation}
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_mu = 1.0
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> pyhf.infer.test_statistics.qmu(test_mu, data, model, init_pars, par_bounds)
array(3.93824492)
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'qmu test statistic used for fit configuration with POI bounded at zero.\n'
+ 'Use the qmu_tilde test statistic (pyhf.infer.test_statistics.qmu_tilde) instead.'
)
return _qmu_like(mu, data, pdf, init_pars, par_bounds)
|
7,188 |
def regionprops(label_image, intensity_image=None, cache=True,
coordinates=None):
r"""Measure properties of labeled image regions.
Parameters
----------
label_image : (N, M) ndarray
Labeled input image. Labels with value 0 are ignored.
.. versionchanged:: 0.14.1
Previously, ``label_image`` was processed by ``numpy.squeeze`` and
so any number of singleton dimensions was allowed. This resulted in
inconsistent handling of images with singleton dimensions. To
recover the old behaviour, use
``regionprops(np.squeeze(label_image), ...)``.
intensity_image : (N, M) ndarray, optional
Intensity (i.e., input) image with same size as labeled image.
Default is None.
cache : bool, optional
Determine whether to cache calculated properties. The computation is
much faster for cached properties, whereas the memory consumption
increases.
coordinates : DEPRECATED
This argument is deprecated and will be removed in a future version
of scikit-image.
See :ref:`Coordinate conventions <numpy-images-coordinate-conventions>`
for more details.
.. deprecated:: 0.16.0
Use "rc" coordinates everywhere. It may be sufficient to call
``numpy.transpose`` on your label image to get the same values as
0.15 and earlier. However, for some properties, the transformation
will be less trivial. For example, the new orientation is
:math:`\frac{\pi}{2}` plus the old orientation.
Returns
-------
properties : list of RegionProperties
Each item describes one labeled region, and can be accessed using the
attributes listed below.
Notes
-----
The following properties can be accessed as attributes or keys:
**area** : int
Number of pixels of the region.
**bbox** : tuple
Bounding box ``(min_row, min_col, max_row, max_col)``.
Pixels belonging to the bounding box are in the half-open interval
``[min_row; max_row)`` and ``[min_col; max_col)``.
**bbox_area** : int
Number of pixels of bounding box.
**centroid** : array
Centroid coordinate tuple ``(row, col)``.
**convex_area** : int
Number of pixels of convex hull image, which is the smallest convex
polygon that encloses the region.
**convex_image** : (H, J) ndarray
Binary convex hull image which has the same size as bounding box.
**coords** : (N, 2) ndarray
Coordinate list ``(row, col)`` of the region.
**eccentricity** : float
Eccentricity of the ellipse that has the same second-moments as the
region. The eccentricity is the ratio of the focal distance
(distance between focal points) over the major axis length.
The value is in the interval [0, 1).
When it is 0, the ellipse becomes a circle.
**equivalent_diameter** : float
The diameter of a circle with the same area as the region.
**euler_number** : int
Euler characteristic of region. Computed as number of objects (= 1)
subtracted by number of holes (8-connectivity).
**extent** : float
Ratio of pixels in the region to pixels in the total bounding box.
Computed as ``area / (rows * cols)``
**filled_area** : int
Number of pixels of the region will all the holes filled in. Describes
the area of the filled_image.
**filled_image** : (H, J) ndarray
Binary region image with filled holes which has the same size as
bounding box.
**image** : (H, J) ndarray
Sliced binary region image which has the same size as bounding box.
**inertia_tensor** : ndarray
Inertia tensor of the region for the rotation around its mass.
**inertia_tensor_eigvals** : tuple
The eigenvalues of the inertia tensor in decreasing order.
**intensity_image** : ndarray
Image inside region bounding box.
**label** : int
The label in the labeled input image.
**local_centroid** : array
Centroid coordinate tuple ``(row, col)``, relative to region bounding
box.
**major_axis_length** : float
The length of the major axis of the ellipse that has the same
normalized second central moments as the region.
**max_intensity** : float
Value with the greatest intensity in the region.
**mean_intensity** : float
Value with the mean intensity in the region.
**min_intensity** : float
Value with the least intensity in the region.
**minor_axis_length** : float
The length of the minor axis of the ellipse that has the same
normalized second central moments as the region.
**moments** : (3, 3) ndarray
Spatial moments up to 3rd order::
m_ij = sum{ array(row, col) * row^i * col^j }
where the sum is over the `row`, `col` coordinates of the region.
**moments_central** : (3, 3) ndarray
Central moments (translation invariant) up to 3rd order::
mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }
where the sum is over the `row`, `col` coordinates of the region,
and `row_c` and `col_c` are the coordinates of the region's centroid.
**moments_hu** : tuple
Hu moments (translation, scale and rotation invariant).
**moments_normalized** : (3, 3) ndarray
Normalized moments (translation and scale invariant) up to 3rd order::
nu_ij = mu_ij / m_00^[(i+j)/2 + 1]
where `m_00` is the zeroth spatial moment.
**orientation** : float
Angle between the 0th axis (rows) and the major
axis of the ellipse that has the same second moments as the region,
ranging from `-pi/2` to `pi/2` counter-clockwise.
**perimeter** : float
Perimeter of object which approximates the contour as a line
through the centers of border pixels using a 4-connectivity.
**slice** : tuple of slices
A slice to extract the object from the source image.
**solidity** : float
Ratio of pixels in the region to pixels of the convex hull image.
**weighted_centroid** : array
Centroid coordinate tuple ``(row, col)`` weighted with intensity
image.
**weighted_local_centroid** : array
Centroid coordinate tuple ``(row, col)``, relative to region bounding
box, weighted with intensity image.
**weighted_moments** : (3, 3) ndarray
Spatial moments of intensity image up to 3rd order::
wm_ij = sum{ array(row, col) * row^i * col^j }
where the sum is over the `row`, `col` coordinates of the region.
**weighted_moments_central** : (3, 3) ndarray
Central moments (translation invariant) of intensity image up to
3rd order::
wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }
where the sum is over the `row`, `col` coordinates of the region,
and `row_c` and `col_c` are the coordinates of the region's weighted
centroid.
**weighted_moments_hu** : tuple
Hu moments (translation, scale and rotation invariant) of intensity
image.
**weighted_moments_normalized** : (3, 3) ndarray
Normalized moments (translation and scale invariant) of intensity
image up to 3rd order::
wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1]
where ``wm_00`` is the zeroth spatial moment (intensity-weighted area).
Each region also supports iteration, so that you can do::
for prop in region:
print(prop, region[prop])
See Also
--------
label
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> img = util.img_as_ubyte(data.coins()) > 110
>>> label_img = label(img, connectivity=img.ndim)
>>> props = regionprops(label_img)
>>> # centroid of first labeled object
>>> props[0].centroid
(22.729879860483141, 81.912285234465827)
>>> # centroid of first labeled object
>>> props[0]['centroid']
(22.729879860483141, 81.912285234465827)
"""
if label_image.ndim not in (2, 3):
raise TypeError('Only 2-D and 3-D images supported.')
if not np.issubdtype(label_image.dtype, np.integer):
raise TypeError('Non-integer image types are ambiguous:'
'use ndimage.label to label the connected'
'components of the image,'
'or label_image.astype(np.uint8) to interpret'
'the True values as a single label')
if coordinates is not None:
if coordinates == 'rc':
msg = ('The coordinates keyword argument to skimage.measure.'
'regionprops is deprecated. All features are now computed '
'in rc (row-column) coordinates. Please remove '
'`coordinates="rc"` from all calls to regionprops before '
'updating scikit-image.')
warn(msg, stacklevel=2, category=FutureWarning)
else:
msg = ('Values other than "rc" for the "coordinates" argument '
'to skimage.measure.regionprops are no longer supported. '
'You should update your code to use "rc" coordinates and '
'stop using the "coordinates" argument, or use skimage '
'version 0.15.x or earlier.')
raise ValueError(msg)
regions = []
objects = ndi.find_objects(label_image)
for i, sl in enumerate(objects):
if sl is None:
continue
label = i + 1
props = RegionProperties(sl, label, label_image, intensity_image,
cache)
regions.append(props)
return regions
|
def regionprops(label_image, intensity_image=None, cache=True,
coordinates=None):
r"""Measure properties of labeled image regions.
Parameters
----------
label_image : (N, M) ndarray
Labeled input image. Labels with value 0 are ignored.
.. versionchanged:: 0.14.1
Previously, ``label_image`` was processed by ``numpy.squeeze`` and
so any number of singleton dimensions was allowed. This resulted in
inconsistent handling of images with singleton dimensions. To
recover the old behaviour, use
``regionprops(np.squeeze(label_image), ...)``.
intensity_image : (N, M) ndarray, optional
Intensity (i.e., input) image with same size as labeled image.
Default is None.
cache : bool, optional
Determine whether to cache calculated properties. The computation is
much faster for cached properties, whereas the memory consumption
increases.
coordinates : DEPRECATED
This argument is deprecated and will be removed in a future version
of scikit-image.
See :ref:`Coordinate conventions <numpy-images-coordinate-conventions>`
for more details.
.. deprecated:: 0.16.0
Use "rc" coordinates everywhere. It may be sufficient to call
``numpy.transpose`` on your label image to get the same values as
0.15 and earlier. However, for some properties, the transformation
will be less trivial. For example, the new orientation is
:math:`\frac{\pi}{2}` plus the old orientation.
Returns
-------
properties : list of RegionProperties
Each item describes one labeled region, and can be accessed using the
attributes listed below.
Notes
-----
The following properties can be accessed as attributes or keys:
**area** : int
Number of pixels of the region.
**bbox** : tuple
Bounding box ``(min_row, min_col, max_row, max_col)``.
Pixels belonging to the bounding box are in the half-open interval
``[min_row; max_row)`` and ``[min_col; max_col)``.
**bbox_area** : int
Number of pixels of bounding box.
**centroid** : array
Centroid coordinate tuple ``(row, col)``.
**convex_area** : int
Number of pixels of convex hull image, which is the smallest convex
polygon that encloses the region.
**convex_image** : (H, J) ndarray
Binary convex hull image which has the same size as bounding box.
**coords** : (N, 2) ndarray
Coordinate list ``(row, col)`` of the region.
**eccentricity** : float
Eccentricity of the ellipse that has the same second-moments as the
region. The eccentricity is the ratio of the focal distance
(distance between focal points) over the major axis length.
The value is in the interval [0, 1).
When it is 0, the ellipse becomes a circle.
**equivalent_diameter** : float
The diameter of a circle with the same area as the region.
**euler_number** : int
Euler characteristic of region. Computed as number of objects (= 1)
subtracted by number of holes (8-connectivity).
**extent** : float
Ratio of pixels in the region to pixels in the total bounding box.
Computed as ``area / (rows * cols)``
**filled_area** : int
Number of pixels of the region will all the holes filled in. Describes
the area of the filled_image.
**filled_image** : (H, J) ndarray
Binary region image with filled holes which has the same size as
bounding box.
**image** : (H, J) ndarray
Sliced binary region image which has the same size as bounding box.
**inertia_tensor** : ndarray
Inertia tensor of the region for the rotation around its mass.
**inertia_tensor_eigvals** : tuple
The eigenvalues of the inertia tensor in decreasing order.
**intensity_image** : ndarray
Image inside region bounding box.
**label** : int
The label in the labeled input image.
**local_centroid** : array
Centroid coordinate tuple ``(row, col)``, relative to region bounding
box.
**major_axis_length** : float
The length of the major axis of the ellipse that has the same
normalized second central moments as the region.
**max_intensity** : float
Value with the greatest intensity in the region.
**mean_intensity** : float
Value with the mean intensity in the region.
**min_intensity** : float
Value with the least intensity in the region.
**minor_axis_length** : float
The length of the minor axis of the ellipse that has the same
normalized second central moments as the region.
**moments** : (3, 3) ndarray
Spatial moments up to 3rd order::
m_ij = sum{ array(row, col) * row^i * col^j }
where the sum is over the `row`, `col` coordinates of the region.
**moments_central** : (3, 3) ndarray
Central moments (translation invariant) up to 3rd order::
mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }
where the sum is over the `row`, `col` coordinates of the region,
and `row_c` and `col_c` are the coordinates of the region's centroid.
**moments_hu** : tuple
Hu moments (translation, scale and rotation invariant).
**moments_normalized** : (3, 3) ndarray
Normalized moments (translation and scale invariant) up to 3rd order::
nu_ij = mu_ij / m_00^[(i+j)/2 + 1]
where `m_00` is the zeroth spatial moment.
**orientation** : float
Angle between the 0th axis (rows) and the major
axis of the ellipse that has the same second moments as the region,
ranging from `-pi/2` to `pi/2` counter-clockwise.
**perimeter** : float
Perimeter of object which approximates the contour as a line
through the centers of border pixels using a 4-connectivity.
**slice** : tuple of slices
A slice to extract the object from the source image.
**solidity** : float
Ratio of pixels in the region to pixels of the convex hull image.
**weighted_centroid** : array
Centroid coordinate tuple ``(row, col)`` weighted with intensity
image.
**weighted_local_centroid** : array
Centroid coordinate tuple ``(row, col)``, relative to region bounding
box, weighted with intensity image.
**weighted_moments** : (3, 3) ndarray
Spatial moments of intensity image up to 3rd order::
wm_ij = sum{ array(row, col) * row^i * col^j }
where the sum is over the `row`, `col` coordinates of the region.
**weighted_moments_central** : (3, 3) ndarray
Central moments (translation invariant) of intensity image up to
3rd order::
wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }
where the sum is over the `row`, `col` coordinates of the region,
and `row_c` and `col_c` are the coordinates of the region's weighted
centroid.
**weighted_moments_hu** : tuple
Hu moments (translation, scale and rotation invariant) of intensity
image.
**weighted_moments_normalized** : (3, 3) ndarray
Normalized moments (translation and scale invariant) of intensity
image up to 3rd order::
wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1]
where ``wm_00`` is the zeroth spatial moment (intensity-weighted area).
Each region also supports iteration, so that you can do::
for prop in region:
print(prop, region[prop])
See Also
--------
label
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
>>> img = util.img_as_ubyte(data.coins()) > 110
>>> label_img = label(img, connectivity=img.ndim)
>>> props = regionprops(label_img)
>>> # centroid of first labeled object
>>> props[0].centroid
(22.729879860483141, 81.912285234465827)
>>> # centroid of first labeled object
>>> props[0]['centroid']
(22.729879860483141, 81.912285234465827)
"""
if label_image.ndim not in (2, 3):
raise TypeError('Only 2-D and 3-D images supported.')
if not np.issubdtype(label_image.dtype, np.integer):
raise TypeError('Non-integer image types are ambiguous:'
'use ndimage.label to label the connected'
'components of the image,'
'or label_image.astype(np.uint8) to interpret'
'the True values as a single label.')
if coordinates is not None:
if coordinates == 'rc':
msg = ('The coordinates keyword argument to skimage.measure.'
'regionprops is deprecated. All features are now computed '
'in rc (row-column) coordinates. Please remove '
'`coordinates="rc"` from all calls to regionprops before '
'updating scikit-image.')
warn(msg, stacklevel=2, category=FutureWarning)
else:
msg = ('Values other than "rc" for the "coordinates" argument '
'to skimage.measure.regionprops are no longer supported. '
'You should update your code to use "rc" coordinates and '
'stop using the "coordinates" argument, or use skimage '
'version 0.15.x or earlier.')
raise ValueError(msg)
regions = []
objects = ndi.find_objects(label_image)
for i, sl in enumerate(objects):
if sl is None:
continue
label = i + 1
props = RegionProperties(sl, label, label_image, intensity_image,
cache)
regions.append(props)
return regions
|
30,942 |
def check_in_domain(domain_name: str, domain_to_check: list) -> CommandResults:
"""
Args:
domain_name: main domain
domain_to_check: list of domains or sub domains that should be checked
Returns:
for each domain for the list an entry with True / False if it is in the domain or not
"""
context_entry = []
for element in domain_to_check:
is_in_domain = False
# split by domain name
domain_to_check_prefix = element.split(domain_name)[0]
if domain_to_check_prefix + domain_name == element:
is_in_domain = True
context_entry.append({
'Domain.Name': element,
'Domain.IsInternal': True if is_in_domain else False
})
return CommandResults(outputs=context_entry)
|
def check_in_domain(domain_name: str, domain_to_check: list) -> CommandResults:
"""
Args:
domain_name: main domain
domain_to_check: list of domains or sub domains that should be checked
Returns:
for each domain for the list an entry with True / False if it is in the domain or not
"""
context_entry = []
for element in domain_to_check:
is_in_domain = False
# split by domain name
domain_to_check_prefix = element.split(domain_name)[0]
is_in_domain = (domain_to_check_prefix + domain_name == element)
context_entry.append({
'Domain.Name': element,
'Domain.IsInternal': True if is_in_domain else False
})
return CommandResults(outputs=context_entry)
|
55,809 |
def is_s3_url(path):
if type(path) != str:
return False
if (path[:2] == 's3') and ('://' in path[:6]):
return True
return False
|
def is_s3_url(path):
return isinstance(path, str) and path.lower().startswith("s3://")
|
10,538 |
def read_file(path):
try:
with open(path, 'r') as f:
return f.read()
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: Reading file failed: %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return None
|
def read_file(path):
try:
with open(path, 'r') as f:
return f.read()
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: unable to read required file %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return None
|
1,904 |
def _sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range (0, 1] or 'auto', default='auto'
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for _ in range(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
|
def _sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range (0, 1] or 'auto', default='auto'
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : int or RandomState instance, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for _ in range(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
|
13,071 |
def list_client_sources(
config: GatewayConfig, customer_id: str
) -> List[CustomerSource]:
merchant_auth = _get_merchant_auth(config.connection_params)
get_customer_profile = apicontractsv1.getCustomerProfileRequest()
get_customer_profile.merchantAuthentication = merchant_auth
get_customer_profile.customerProfileId = customer_id
get_customer_profile.unmaskExpirationDate = True
controller = getCustomerProfileController(get_customer_profile)
if config.connection_params.get("use_sandbox") is False:
controller.setenvironment(constants.PRODUCTION)
controller.execute()
response = controller.getresponse()
results = []
if hasattr(response, "profile") and hasattr(response.profile, "paymentProfiles"):
for payment_profile in response.profile.paymentProfiles:
if hasattr(payment_profile, "payment") and hasattr(
payment_profile.payment, "creditCard"
):
name = None
if hasattr(payment_profile, "billTo"):
first = payment_profile.billTo.firstName.pyval
last = payment_profile.billTo.lastName.pyval
if first:
name = first + " " + last
else:
name = last
card = payment_profile.payment.creditCard
expiration_year, expiration_month = _normalize_card_expiration(
card.expirationDate.pyval
)
results.append(
CustomerSource(
id=payment_profile.customerPaymentProfileId.pyval,
gateway="authorize.net",
credit_card_info=PaymentMethodInfo(
exp_year=expiration_year,
exp_month=expiration_month,
last_4=_normalize_last_4(card.cardNumber.pyval),
brand=card.cardType.pyval,
name=name,
),
)
)
return results
|
def list_client_sources(
config: GatewayConfig, customer_id: str
) -> List[CustomerSource]:
merchant_auth = _get_merchant_auth(config.connection_params)
get_customer_profile = apicontractsv1.getCustomerProfileRequest()
get_customer_profile.merchantAuthentication = merchant_auth
get_customer_profile.customerProfileId = customer_id
get_customer_profile.unmaskExpirationDate = True
controller = getCustomerProfileController(get_customer_profile)
if not config.connection_params.get("use_sandbox"):
controller.setenvironment(constants.PRODUCTION)
controller.execute()
response = controller.getresponse()
results = []
if hasattr(response, "profile") and hasattr(response.profile, "paymentProfiles"):
for payment_profile in response.profile.paymentProfiles:
if hasattr(payment_profile, "payment") and hasattr(
payment_profile.payment, "creditCard"
):
name = None
if hasattr(payment_profile, "billTo"):
first = payment_profile.billTo.firstName.pyval
last = payment_profile.billTo.lastName.pyval
if first:
name = first + " " + last
else:
name = last
card = payment_profile.payment.creditCard
expiration_year, expiration_month = _normalize_card_expiration(
card.expirationDate.pyval
)
results.append(
CustomerSource(
id=payment_profile.customerPaymentProfileId.pyval,
gateway="authorize.net",
credit_card_info=PaymentMethodInfo(
exp_year=expiration_year,
exp_month=expiration_month,
last_4=_normalize_last_4(card.cardNumber.pyval),
brand=card.cardType.pyval,
name=name,
),
)
)
return results
|
7,727 |
def test_failure(pin_mats, good_radii):
"""Check for various failure modes"""
# Bad material type
with pytest.raises(TypeError):
Pin.from_radii(good_radii, [mat.name for mat in pin_mats])
# Incorrect lengths
with pytest.raises(ValueError, match="length") as exec_info:
Pin.from_radii(good_radii[: len(pin_mats) - 2], pin_mats)
# Non-positive radii
rad = (-0.1,) + good_radii[1:]
with pytest.raises(ValueError, match="index 0") as exec_info:
Pin.from_radii(rad, pin_mats)
# Non-increasing radii
rad = tuple(reversed(good_radii))
with pytest.raises(ValueError, match="index 1") as exec_info:
Pin.from_radii(rad, pin_mats)
# Bad orientation
with pytest.raises(ValueError, match="Orientation") as exec_info:
Pin.from_radii(good_radii, pin_mats, orientation="fail")
|
def test_failure(pin_mats, good_radii):
"""Check for various failure modes"""
# Bad material type
with pytest.raises(TypeError):
Pin.from_radii(good_radii, [mat.name for mat in pin_mats])
# Incorrect lengths
with pytest.raises(ValueError, match="length"):
Pin.from_radii(good_radii[: len(pin_mats) - 2], pin_mats)
# Non-positive radii
rad = (-0.1,) + good_radii[1:]
with pytest.raises(ValueError, match="index 0") as exec_info:
Pin.from_radii(rad, pin_mats)
# Non-increasing radii
rad = tuple(reversed(good_radii))
with pytest.raises(ValueError, match="index 1") as exec_info:
Pin.from_radii(rad, pin_mats)
# Bad orientation
with pytest.raises(ValueError, match="Orientation") as exec_info:
Pin.from_radii(good_radii, pin_mats, orientation="fail")
|
32,010 |
def get_cves(client: PrismaCloudComputeClient, args: dict) -> List[CommandResults]:
"""
Get cves information, implement the command 'cve'.
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): cve command arguments.
Returns:
CommandResults: command-results object.
"""
cve_ids = argToList(arg=args.get("cve_id", []))
all_cves_information, results, unique_cve_ids = [], [], set()
for _id in cve_ids:
if cves_info := client.get_cve_info(cve_id=_id):
all_cves_information.extend(cves_info)
if filtered_cves_information := filter_api_response(api_response=all_cves_information, limit=MAX_API_LIMIT):
for cve_info in filtered_cves_information:
cve_id, cvss = cve_info.get("cve"), cve_info.get("cvss")
modified, description = epochs_to_timestamp(epochs=cve_info.get("modified")), cve_info.get("description")
if cve_id not in unique_cve_ids:
unique_cve_ids.add(cve_id)
cve_data = {
"ID": cve_id, "CVSS": cvss, "Modified": modified, "Description": description
}
results.append(
CommandResults(
outputs_prefix="CVE",
outputs_key_field=["ID"],
outputs=cve_data,
indicator=Common.CVE(
id=cve_id, cvss=cvss, published="", modified=modified, description=description
),
raw_response=filtered_cves_information,
readable_output=tableToMarkdown(name=cve_id, t=cve_data)
)
)
return results
return [CommandResults(readable_output="No results found")]
|
def get_cves(client: PrismaCloudComputeClient, args: dict) -> List[CommandResults]:
"""
Get cves information, implement the command 'cve'.
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): cve command arguments.
Returns:
CommandResults: command-results object.
"""
cve_ids = argToList(arg=args.get("cve_id", []))
all_cves_information, results, unique_cve_ids = [], [], set()
for _id in cve_ids:
if cves_info := client.get_cve_info(cve_id=_id):
all_cves_information.extend(cves_info)
if filtered_cves_information := filter_api_response(api_response=all_cves_information, limit=MAX_API_LIMIT):
for cve_info in filtered_cves_information:
cve_id, cvss = cve_info.get("cve"), cve_info.get("cvss")
modified, description = epochs_to_timestamp(epochs=cve_info.get("modified")), cve_info.get("description")
if cve_id not in unique_cve_ids:
unique_cve_ids.add(cve_id)
cve_data = {
"ID": cve_id, "CVSS": cvss, "Modified": modified, "Description": description
}
results.append(
CommandResults(
outputs_prefix="CVE",
outputs_key_field=["ID"],
outputs=cve_data,
indicator=Common.CVE(
id=cve_id, cvss=cvss, published="", modified=modified, description=description
),
raw_response=filtered_cves_information,
readable_output=tableToMarkdown(name=cve_id, t=cve_data)
)
)
return results
return [CommandResults(readable_output="No results found.")]
|
21,881 |
def _merge_into(repo: Repo, source: str, target: str) -> None:
"""
Merges branch `source` into branch `target`.
Pulls both before merging and pushes the result.
"""
# Update our branches and switch to the target branch
for branch in [source, target]:
click.echo(f"Switching to {branch} and pulling...")
repo.heads[branch].checkout()
# Pull so we're up to date
repo.remote().pull()
assert repo.active_branch.name == target
try:
# TODO This seemed easier than using GitPython directly
click.echo(f"Merging {source}")
repo.git.merge(source)
except GitCommandError as exc:
# If a merge conflict occurs, give some context and try to
# make it easy to abort if necessary.
click.echo(exc)
if not click.confirm(
f"Likely merge conflict whilst merging ({source} → {target}). "
f"Have you resolved it?"
):
repo.git.merge("--abort")
return
# Push result.
click.echo("Pushing")
repo.remote().push()
|
def _merge_into(repo: Repo, source: str, target: str) -> None:
"""
Merges branch `source` into branch `target`.
Pulls both before merging and pushes the result.
"""
# Update our branches and switch to the target branch
for branch in [source, target]:
click.echo(f"Switching to {branch} and pulling...")
repo.heads[branch].checkout()
# Pull so we're up to date
repo.remote().pull()
assert repo.active_branch.name == target
try:
# TODO This seemed easier than using GitPython directly
click.echo(f"Merging {source}...")
repo.git.merge(source)
except GitCommandError as exc:
# If a merge conflict occurs, give some context and try to
# make it easy to abort if necessary.
click.echo(exc)
if not click.confirm(
f"Likely merge conflict whilst merging ({source} → {target}). "
f"Have you resolved it?"
):
repo.git.merge("--abort")
return
# Push result.
click.echo("Pushing")
repo.remote().push()
|
17,378 |
def concat(
objs,
dim,
data_vars="all",
coords="different",
compat="equals",
positions=None,
fill_value=dtypes.NA,
join="outer",
combine_attrs="override",
):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition to the 'minimal' coordinates.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional
String indicating how to compare non-concatenated variables of the same name for
potential conflicts. This is passed down to merge.
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- 'override': skip comparing and pick variable from first dataset
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, optional
String indicating how to combine attrs of the objects being merged:
- 'drop': empty attrs on returned Dataset.
- 'identical': all attrs must be the same on every object.
- 'no_conflicts': attrs from all objects are combined, any that have
the same name must also have the same value.
- 'override': skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
if compat not in _VALID_COMPAT:
raise ValueError(
"compat=%r invalid: must be 'broadcast_equals', 'equals', 'identical', 'no_conflicts' or 'override'"
% compat
)
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError(
"can only concatenate xarray Dataset and DataArray "
"objects, got %s" % type(first_obj)
)
return f(
objs, dim, data_vars, coords, compat, positions, fill_value, join, combine_attrs
)
|
def concat(
objs,
dim,
data_vars="all",
coords="different",
compat="equals",
positions=None,
fill_value=dtypes.NA,
join="outer",
combine_attrs="override",
):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition to the 'minimal' coordinates.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional
String indicating how to compare non-concatenated variables of the same name for
potential conflicts. This is passed down to merge.
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- 'override': skip comparing and pick variable from first dataset
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, default 'override'
String indicating how to combine attrs of the objects being merged:
- 'drop': empty attrs on returned Dataset.
- 'identical': all attrs must be the same on every object.
- 'no_conflicts': attrs from all objects are combined, any that have
the same name must also have the same value.
- 'override': skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
if compat not in _VALID_COMPAT:
raise ValueError(
"compat=%r invalid: must be 'broadcast_equals', 'equals', 'identical', 'no_conflicts' or 'override'"
% compat
)
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError(
"can only concatenate xarray Dataset and DataArray "
"objects, got %s" % type(first_obj)
)
return f(
objs, dim, data_vars, coords, compat, positions, fill_value, join, combine_attrs
)
|
33,914 |
def single_node_benchmark():
import ray
ray.init("auto")
assert NUM_NODES * NUM_GPUS_PER_NODE == NUM_GPUS_PER_NODE
# Time torch.distributed using torch.multiprocessing.
address = "127.0.0.1"
port = find_free_port()
torch_time = torch_run_single_node(
train_func=torch_train,
world_size=NUM_NODES * NUM_GPUS_PER_NODE,
num_workers_per_node=NUM_GPUS_PER_NODE,
address=address,
port=port)
# Time using Ray Train.
ray_time = ray_train_run(train_func=train_func, world_size=4)
# Make sure that the torch.distributed time and Ray Train time are
# within 5% of each other.
assert abs(torch_time - ray_time) <= min(0.05 * torch_time,
0.05 * ray_time), \
f"torch.distributed time: {torch_time}, Ray Train time: {ray_time}"
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(
json.dumps({
"torch_time": torch_time,
"ray_train_time": ray_time
}))
|
def single_node_benchmark():
import ray
ray.init("auto")
assert NUM_NODES == 1
# Time torch.distributed using torch.multiprocessing.
address = "127.0.0.1"
port = find_free_port()
torch_time = torch_run_single_node(
train_func=torch_train,
world_size=NUM_NODES * NUM_GPUS_PER_NODE,
num_workers_per_node=NUM_GPUS_PER_NODE,
address=address,
port=port)
# Time using Ray Train.
ray_time = ray_train_run(train_func=train_func, world_size=4)
# Make sure that the torch.distributed time and Ray Train time are
# within 5% of each other.
assert abs(torch_time - ray_time) <= min(0.05 * torch_time,
0.05 * ray_time), \
f"torch.distributed time: {torch_time}, Ray Train time: {ray_time}"
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(
json.dumps({
"torch_time": torch_time,
"ray_train_time": ray_time
}))
|
46,022 |
def edge_aware_blur_pool2d(
input: torch.Tensor,
kernel_size: int,
edge_threshold: float = 2.0,
edge_dilatation_kernel_size: int = 3,
epsilon: float = 1e-6,
) -> torch.Tensor:
r"""Blur the input tensor while maintaining its edges.
Edge detection is done with the sobel filter, and blurring is done with a pool2d.
Args:
input: the input image to blur with shape :math:`(B, C, H, W)`.
kernel_size: the kernel size for max pooling.
edge_threshold: threshold for the edge decision rule; edge/non-edge.
edge_dilatation_kernel_size: the kernel size for dilating the edges.
epsilon: for numerical stability.
Returns:
The blurred tensor of shape :math:`(B, C, H, W)`.
"""
input = F.pad(input, (2, 2, 2, 2), mode="reflect") # pad to avoid artifacts near physical edges
blurred_input = blur_pool2d(input, kernel_size=kernel_size, stride=1) # blurry version of the input
# calculate the edges (add epsilon to avoid taking the log of 0)
log_input, log_thresh = torch.log2(input + epsilon), torch.log2(torch.tensor(edge_threshold))
edges_x = log_input[..., :, 4:] - log_input[..., :, :-4]
edges_y = log_input[..., 4:, :] - log_input[..., :-4, :]
edges_x, edges_y = torch.mean(edges_x, dim=-3, keepdim=True), torch.mean(edges_y, dim=-3, keepdim=True)
edges_x_mask, edges_y_mask = edges_x.abs() > log_thresh.to(edges_x), edges_y.abs() > log_thresh.to(edges_y)
edges_xy_mask = (edges_x_mask[..., 2:-2, :] + edges_y_mask[..., :, 2:-2]).type_as(input)
# dilate the content edges to have a soft mask of edges
dilated_edges = F.max_pool3d(edges_xy_mask, edge_dilatation_kernel_size, 1, edge_dilatation_kernel_size // 2)
# slice the padded regions
input = input[..., 2:-2, 2:-2]
blurred_input = blurred_input[..., 2:-2, 2:-2]
# fuse the input image on edges and blurry input everywhere else
blurred = dilated_edges * input + (1.0 - dilated_edges) * blurred_input
return blurred
|
def edge_aware_blur_pool2d(
input: Tensor,
kernel_size: int,
edge_threshold: float = 2.0,
edge_dilatation_kernel_size: int = 3,
epsilon: float = 1e-6,
) -> torch.Tensor:
r"""Blur the input tensor while maintaining its edges.
Edge detection is done with the sobel filter, and blurring is done with a pool2d.
Args:
input: the input image to blur with shape :math:`(B, C, H, W)`.
kernel_size: the kernel size for max pooling.
edge_threshold: threshold for the edge decision rule; edge/non-edge.
edge_dilatation_kernel_size: the kernel size for dilating the edges.
epsilon: for numerical stability.
Returns:
The blurred tensor of shape :math:`(B, C, H, W)`.
"""
input = F.pad(input, (2, 2, 2, 2), mode="reflect") # pad to avoid artifacts near physical edges
blurred_input = blur_pool2d(input, kernel_size=kernel_size, stride=1) # blurry version of the input
# calculate the edges (add epsilon to avoid taking the log of 0)
log_input, log_thresh = torch.log2(input + epsilon), torch.log2(torch.tensor(edge_threshold))
edges_x = log_input[..., :, 4:] - log_input[..., :, :-4]
edges_y = log_input[..., 4:, :] - log_input[..., :-4, :]
edges_x, edges_y = torch.mean(edges_x, dim=-3, keepdim=True), torch.mean(edges_y, dim=-3, keepdim=True)
edges_x_mask, edges_y_mask = edges_x.abs() > log_thresh.to(edges_x), edges_y.abs() > log_thresh.to(edges_y)
edges_xy_mask = (edges_x_mask[..., 2:-2, :] + edges_y_mask[..., :, 2:-2]).type_as(input)
# dilate the content edges to have a soft mask of edges
dilated_edges = F.max_pool3d(edges_xy_mask, edge_dilatation_kernel_size, 1, edge_dilatation_kernel_size // 2)
# slice the padded regions
input = input[..., 2:-2, 2:-2]
blurred_input = blurred_input[..., 2:-2, 2:-2]
# fuse the input image on edges and blurry input everywhere else
blurred = dilated_edges * input + (1.0 - dilated_edges) * blurred_input
return blurred
|
54,506 |
def create_study(n_objectives, seed):
directions = ["minimize" for _ in range(n_objectives)]
sampler_name = sys.argv[1]
# Sampler.
sampler_cls = getattr(
optuna.multi_objective.samplers,
sampler_name,
getattr(optuna.integration, sampler_name, None),
)
if sampler_cls is None:
raise ValueError("Unknown sampler: {}.".format(sampler_name))
# TODO(drumehiron): sampler_kwargs
# sampler_kwargs = json.loads(sys.argv[2])
# try:
# sampler_kwargs["seed"] = seed
# sampler = sampler_cls(**sampler_kwargs)
# except:
# del sampler_kwargs["seed"]
# sampler = sampler_cls(**sampler_kwargs)
sampler = sampler_cls()
return optuna.multi_objective.create_study(directions=directions, sampler=sampler)
|
def create_study(n_objectives, seed):
directions = ["minimize" for _ in range(n_objectives)]
sampler_name = sys.argv[1]
# Sampler.
sampler_cls = getattr(
optuna.multi_objective.samplers,
sampler_name,
getattr(optuna.integration, sampler_name, None),
)
if sampler_cls is None:
raise ValueError("Unknown sampler: {}.".format(sampler_name))
# TODO(drumehiron): sampler_kwargs
# sampler_kwargs = json.loads(sys.argv[2])
# try:
# sampler_kwargs["seed"] = seed
# sampler = sampler_cls(**sampler_kwargs)
# except:
# del sampler_kwargs["seed"]
# sampler = sampler_cls(**sampler_kwargs)
sampler = sampler_cls()
return optuna.create_study(
directions=directions,
sampler=sampler,
pruner=optuna.pruners.NopPruner(),
)
|
33,001 |
def load_config():
config_dict = {}
if os.path.isfile( CONFIG_FILEPATH ):
try:
with open( CONFIG_FILEPATH, 'rb' ) as f:
config_dict = pickle.load( f )
except:
#print('[ERROR]: Can not read config from %s' %CONFIG_FILEPATH)
logger.error(' Can not read config from %s' % CONFIG_FILEPATH)
for tag in _CONFIG_DEFAULTS_ALL:
if tag not in config_dict:
config_dict[ tag ] = _CONFIG_DEFAULTS_ALL[ tag ]
for tag in _CONFIG_TAGS_:
if tag not in config_dict:
if sys.platform.startswith('win'):
config_dict[ tag ] = _CONFIG_DEFAULTS_WINDOWS[ tag ]
elif sys.platform.startswith('linux') or sys.platform.startswith('darwin') or sys.platform.startswith('freebsd'):
config_dict[ tag ] = _CONFIG_DEFAULTS_UNIX[ tag ]
else:
#print( 'ERROR: unknown platform' )
logger.error( ' Unknown platform: %s' % sys.platform)
assert 0
try:
if sys.platform.startswith('win'):
import winreg
# Find the blender2ogre install path from windows registry
registry_key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, r'Software\blender2ogre', 0, winreg.KEY_READ)
exe_install_dir = winreg.QueryValueEx(registry_key, "Path")[0]
if exe_install_dir != "":
# OgreXmlConverter
if os.path.isfile(exe_install_dir + "OgreXmlConverter.exe"):
#print ("Using OgreXmlConverter from install path:", exe_install_dir + "OgreXmlConverter.exe")
logger.info (" Using OgreXmlConverter from install path: %sOgreXmlConverter.exe" % exe_install_dir)
config_dict['OGRETOOLS_XML_CONVERTER'] = exe_install_dir + "OgreXmlConverter.exe"
# Run auto updater as silent. Notifies user if there is a new version out.
# This will not show any UI if there are no update and will go to network
# only once per 2 days so it wont be spending much resources either.
# todo: Move this to a more appropriate place than load_config()
if os.path.isfile(exe_install_dir + "check-for-updates.exe"):
subprocess.Popen([exe_install_dir + "check-for-updates.exe", "/silent"])
except Exception as e:
#print("Exception while reading windows registry:", e)
logger.error(" Exception while reading windows registry: %s" % e)
# Setup temp hidden RNA to expose the file paths
for tag in _CONFIG_TAGS_:
default = config_dict[ tag ]
#func = eval( 'lambda self,con: config_dict.update( {"%s" : self.%s} )' %(tag,tag) )
func = lambda self,con: config_dict.update( {tag : getattr(self,tag,default)} )
if type(default) is bool:
prop = BoolProperty( name=tag,
description='updates bool setting',
default=default,
options={'SKIP_SAVE'},
update=func)
else:
prop = StringProperty( name=tag,
description='updates path setting',
maxlen=128,
default=default,
options={'SKIP_SAVE'},
update=func)
setattr( bpy.types.WindowManager, tag, prop )
return config_dict
|
def load_config():
config_dict = {}
if os.path.isfile( CONFIG_FILEPATH ):
try:
with open( CONFIG_FILEPATH, 'rb' ) as f:
config_dict = pickle.load( f )
except:
#print('[ERROR]: Can not read config from %s' %CONFIG_FILEPATH)
logger.error(' Can not read config from %s', CONFIG_FILEPATH)
for tag in _CONFIG_DEFAULTS_ALL:
if tag not in config_dict:
config_dict[ tag ] = _CONFIG_DEFAULTS_ALL[ tag ]
for tag in _CONFIG_TAGS_:
if tag not in config_dict:
if sys.platform.startswith('win'):
config_dict[ tag ] = _CONFIG_DEFAULTS_WINDOWS[ tag ]
elif sys.platform.startswith('linux') or sys.platform.startswith('darwin') or sys.platform.startswith('freebsd'):
config_dict[ tag ] = _CONFIG_DEFAULTS_UNIX[ tag ]
else:
#print( 'ERROR: unknown platform' )
logger.error( ' Unknown platform: %s' % sys.platform)
assert 0
try:
if sys.platform.startswith('win'):
import winreg
# Find the blender2ogre install path from windows registry
registry_key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, r'Software\blender2ogre', 0, winreg.KEY_READ)
exe_install_dir = winreg.QueryValueEx(registry_key, "Path")[0]
if exe_install_dir != "":
# OgreXmlConverter
if os.path.isfile(exe_install_dir + "OgreXmlConverter.exe"):
#print ("Using OgreXmlConverter from install path:", exe_install_dir + "OgreXmlConverter.exe")
logger.info (" Using OgreXmlConverter from install path: %sOgreXmlConverter.exe" % exe_install_dir)
config_dict['OGRETOOLS_XML_CONVERTER'] = exe_install_dir + "OgreXmlConverter.exe"
# Run auto updater as silent. Notifies user if there is a new version out.
# This will not show any UI if there are no update and will go to network
# only once per 2 days so it wont be spending much resources either.
# todo: Move this to a more appropriate place than load_config()
if os.path.isfile(exe_install_dir + "check-for-updates.exe"):
subprocess.Popen([exe_install_dir + "check-for-updates.exe", "/silent"])
except Exception as e:
#print("Exception while reading windows registry:", e)
logger.error(" Exception while reading windows registry: %s" % e)
# Setup temp hidden RNA to expose the file paths
for tag in _CONFIG_TAGS_:
default = config_dict[ tag ]
#func = eval( 'lambda self,con: config_dict.update( {"%s" : self.%s} )' %(tag,tag) )
func = lambda self,con: config_dict.update( {tag : getattr(self,tag,default)} )
if type(default) is bool:
prop = BoolProperty( name=tag,
description='updates bool setting',
default=default,
options={'SKIP_SAVE'},
update=func)
else:
prop = StringProperty( name=tag,
description='updates path setting',
maxlen=128,
default=default,
options={'SKIP_SAVE'},
update=func)
setattr( bpy.types.WindowManager, tag, prop )
return config_dict
|
3,298 |
def get_progress(group_id):
pending = _get_sync_redis_client().get(_get_sync_counter_key(group_id))
info = _get_sync_redis_client().get(_get_info_reprocessed_key(group_id))
if pending is None:
logger.error("reprocessing2.missing_counter")
return 0, None
if info is None:
logger.error("reprocessing2.missing_info")
return 0, None
return int(pending), six.binary_type(info)
|
def get_progress(group_id):
pending = _get_sync_redis_client().get(_get_sync_counter_key(group_id))
info = _get_sync_redis_client().get(_get_info_reprocessed_key(group_id))
if pending is None:
logger.error("reprocessing2.missing_counter")
return 0, None
if info is None:
logger.error("reprocessing2.missing_info")
return 0, None
return int(pending), json.loads(info)
|
31,422 |
def test_module(client: Client, *args) -> str:
"""Tests API connectivity and authentication'
A simple call to list_livehunt_rules
Returns:
'ok' if test passed, anything else will fail the test.
"""
client.list_livehunt_rules(limit=1)
return 'ok'
|
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
A simple call to list_livehunt_rules
Returns:
'ok' if test passed, anything else will fail the test.
"""
client.list_livehunt_rules(limit=1)
return 'ok'
|
1,674 |
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, default=None
Array of float that is used for cross-validation. If not
provided, computed using 'path'.
l1_ratio : float, default=1
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2.
X_order : {'F', 'C'}, default=None
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype, default=None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
|
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, default=None
Array of float that is used for cross-validation. If not
provided, computed using 'path'.
l1_ratio : float, default=1
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2.
X_order : {'F', 'C'}, default=None
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype, default=None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
|
33,784 |
def get_project_package_name(working_dir: str, py_modules: List[str],
excludes: List[str]) -> str:
"""Get the name of the package by working dir and modules.
This function will generate the name of the package by the working
directory and modules. It'll go through all the files in working_dir
and modules and hash the contents of these files to get the hash value
of this package. The final package name is: _ray_pkg_<HASH_VAL>.zip
Right now, only the modules given will be included. The dependencies
are not included automatically.
Examples:
.. code-block:: python
>>> import any_module
>>> get_project_package_name("/working_dir", [any_module])
.... _ray_pkg_af2734982a741.zip
e.g., _ray_pkg_029f88d5ecc55e1e4d64fc6e388fd103.zip
Args:
working_dir (str): The working directory.
py_modules (list[str]): The python module.
excludes (set[str]): The dir or files that should be excluded
Returns:
Package name as a string.
"""
RAY_PKG_PREFIX = "_ray_pkg_"
hash_val = None
excludes = {Path(p).absolute() for p in excludes}
if working_dir:
assert isinstance(working_dir, str), f"`working_dir` is not a string."
working_dir = Path(working_dir).absolute()
if not working_dir.exists() or not working_dir.is_dir():
raise IOError(f"Invalid working dir {working_dir}.")
hash_val = _xor_bytes(
hash_val, _hash_modules(working_dir, working_dir, excludes))
for py_module in py_modules or []:
assert isinstance(py_module, str), f"`py_module` is not a string."
module_dir = Path(py_module).absolute()
if not module_dir.exists() or not module_dir.is_dir():
raise IOError(f"Invalid py_module {py_module}.")
hash_val = _xor_bytes(
hash_val, _hash_modules(module_dir, module_dir.parent, excludes))
return RAY_PKG_PREFIX + hash_val.hex() + ".zip" if hash_val else None
|
def get_project_package_name(working_dir: str, py_modules: List[str],
excludes: List[str]) -> str:
"""Get the name of the package by working dir and modules.
This function will generate the name of the package by the working
directory and modules. It'll go through all the files in working_dir
and modules and hash the contents of these files to get the hash value
of this package. The final package name is: _ray_pkg_<HASH_VAL>.zip
Right now, only the modules given will be included. The dependencies
are not included automatically.
Examples:
.. code-block:: python
>>> import any_module
>>> get_project_package_name("/working_dir", [any_module])
.... _ray_pkg_af2734982a741.zip
e.g., _ray_pkg_029f88d5ecc55e1e4d64fc6e388fd103.zip
Args:
working_dir (str): The working directory.
py_modules (list[str]): The python module.
excludes (set[str]): The dir or files that should be excluded
Returns:
Package name as a string.
"""
RAY_PKG_PREFIX = "_ray_pkg_"
hash_val = None
excludes = {Path(p).absolute() for p in excludes}
if working_dir:
assert isinstance(working_dir, str), f"`working_dir` is not a string."
working_dir = Path(working_dir).absolute()
if not working_dir.exists() or not working_dir.is_dir():
raise ValueError(f"working_dir {working_dir} must be an existing directory.")
hash_val = _xor_bytes(
hash_val, _hash_modules(working_dir, working_dir, excludes))
for py_module in py_modules or []:
assert isinstance(py_module, str), f"`py_module` is not a string."
module_dir = Path(py_module).absolute()
if not module_dir.exists() or not module_dir.is_dir():
raise IOError(f"Invalid py_module {py_module}.")
hash_val = _xor_bytes(
hash_val, _hash_modules(module_dir, module_dir.parent, excludes))
return RAY_PKG_PREFIX + hash_val.hex() + ".zip" if hash_val else None
|
5,435 |
def test_profile():
env = Environment(extensions=[SerializerExtension])
source = (
"{%- profile as 'profile test' %}"
+ "{% set var = 'val' %}"
+ "{%- endprofile %}"
+ "{{ var }}"
)
rendered = env.from_string(source).render()
assert rendered == "val"
|
def test_profile():
env = Environment(extensions=[SerializerExtension])
source = (
"{%- profile as 'profile test' %}"
"{% set var = 'val' %}"
+ "{%- endprofile %}"
+ "{{ var }}"
)
rendered = env.from_string(source).render()
assert rendered == "val"
|
1,641 |
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
|
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
|
23,194 |
def remove_completer(name: str):
"""removes a completer from xonsh
Parameters
----------
name:
NAME is a unique name of a completer (run "completer list" to see the current
completers in order)
"""
err = None
if name not in xsh_session.completers:
err = "The name %s is not a registered " "completer function." % name
if err is None:
del xsh_session.completers[name]
return
else:
return None, err + "\n", 1
|
def remove_completer(name: str):
"""removes a completer from xonsh
Parameters
----------
name:
NAME is a unique name of a completer (run "completer list" to see the current
completers in order)
"""
err = None
if name not in xsh_session.completers:
err = f"The name {name} is not a registered completer function."
if err is None:
del xsh_session.completers[name]
return
else:
return None, err + "\n", 1
|
8,491 |
def _pyi_machine(machine, system):
# type: (str, str) -> str
"""
Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.
Args:
machine:
The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a
C compiler.
system:
The output of ``platform.system()`` on the target machine.
Returns:
Either a string tag or, on platforms that don't need an architecture tag, ``None``.
Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost
impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based
only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose
differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.
"""
# See the corresponding tests in tests/unit/test_compat.py for examples.
if platform.machine() == "sw_64" or platform.machine() == "loongarch64":
# This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.
return platform.machine()
if system != "Linux":
# No architecture specifier for anything par Linux.
# - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless
# and painful to give Windows an architecture specifier.
# - macOS is on two 64 bit architectures, but they are merged into one "universal2" bootloader.
# - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our
# BSD users are on x86_64. This may change in the distant future.
return
if machine.startswith(("arm", "aarch")):
# ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.
return "arm"
if machine in ("x86_64", "x64", "x86"):
return "intel"
if re.fullmatch("i[1-6]86", machine):
return "intel"
if machine.startswith(("ppc", "powerpc")):
# PowerPC comes in 64 vs 32 bit and little vs big endian variants.
return "ppc"
if machine in ("mips64", "mips"):
return "mips"
# Machines with no known aliases :)
else:
return machine
# Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to
# have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently
# unlikely to ever happen.
return "unknown"
|
def _pyi_machine(machine, system):
# type: (str, str) -> str
"""
Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.
Args:
machine:
The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a
C compiler.
system:
The output of ``platform.system()`` on the target machine.
Returns:
Either a string tag or, on platforms that don't need an architecture tag, ``None``.
Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost
impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based
only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose
differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.
"""
# See the corresponding tests in tests/unit/test_compat.py for examples.
if platform.machine() == "sw_64" or platform.machine() == "loongarch64":
# This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.
return platform.machine()
if system != "Linux":
# No architecture specifier for anything par Linux.
# - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless
# and painful to give Windows an architecture specifier.
# - macOS is on two 64 bit architectures, but they are merged into one "universal2" bootloader.
# - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our
# BSD users are on x86_64. This may change in the distant future.
return
if machine.startswith(("arm", "aarch")):
# ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.
return "arm"
if machine in ("x86_64", "x64", "x86"):
return "intel"
if re.fullmatch("i[1-6]86", machine):
return "intel"
if machine.startswith(("ppc", "powerpc")):
# PowerPC comes in 64 vs 32 bit and little vs big endian variants.
return "ppc"
if machine in ("mips64", "mips"):
return "mips"
# Machines with no known aliases :)
if machine in ("s390x",):
return machine
# Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to
# have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently
# unlikely to ever happen.
return "unknown"
|
24,623 |
def test_trilinear_approx():
vspace1_args = {
"x_range": [0, 10],
"y_range": [0, 10],
"z_range": [0, 10],
"precision": [10 / 46, 10 / 46, 10 / 46],
"func": vspace_func_1,
}
vspace1 = vector_space(**vspace1_args)
vspace2_args = {
"x_range": [0, 10],
"y_range": [0, 10],
"z_range": [0, 10],
"precision": [10 / 46, 10 / 46, 10 / 46],
"func": vspace_func_2,
}
vspace2 = vector_space(**vspace2_args)
dx, dy, dz = vspace2[2]
dx = dx[0]
dy = dy[0]
dz = dz[0]
f000 = [0, 0, 0]
f001 = [0, 0, dz]
f010 = [0, dy, 0]
f011 = [0, dy, dz]
f100 = [dx, 0, 0]
f101 = [dx, 0, dz]
f110 = [dx, dy, 0]
f111 = [dx, dy, dz]
mid = [dx / 2, dy / 2, dz / 2]
corners = [f000, f001, f010, f011, f100, f101, f110, f111]
tlApprox = trilinear_approx(vspace2, [0, 0, 0])
# Testing Trilinear Approx function on the corners
for p in corners:
approx = tlApprox(p[0], p[1], p[2])
exact = vspace_func_2(p[0], p[1], p[2])
approx = approx.reshape(1, 3)
arr = np.isclose(approx, exact, atol=ATOL)
assert arr.all()
# Testing Trilinear Approx function on a midpoint
approx = tlApprox(mid[0], mid[1], mid[2])
approx = approx.reshape(1, 3)
arr = np.isclose(approx, [-5.39130435, -21.5652174, 23.68667299], atol=ATOL)
assert arr.all()
|
def test_trilinear_approx():
vspace1_args = {
"x_range": [0, 10],
"y_range": [0, 10],
"z_range": [0, 10],
"precision": [10 / 46, 10 / 46, 10 / 46],
"func": vspace_func_1,
}
vspace1 = vector_space(**vspace1_args)
vspace2_args = {
"x_range": [0, 10],
"y_range": [0, 10],
"z_range": [0, 10],
"precision": [10 / 46, 10 / 46, 10 / 46],
"func": vspace_func_2,
}
vspace2 = vector_space(**vspace2_args)
dx, dy, dz = vspace2[2]
dx = dx[0]
dy = dy[0]
dz = dz[0]
f000 = [0, 0, 0]
f001 = [0, 0, dz]
f010 = [0, dy, 0]
f011 = [0, dy, dz]
f100 = [dx, 0, 0]
f101 = [dx, 0, dz]
f110 = [dx, dy, 0]
f111 = [dx, dy, dz]
mid = [dx / 2, dy / 2, dz / 2]
corners = [f000, f001, f010, f011, f100, f101, f110, f111]
tlApprox = trilinear_approx(vspace2, [0, 0, 0])
# Testing Trilinear Approx function on the corners
for p in corners:
approx = tlApprox(p[0], p[1], p[2])
exact = vspace_func_2(p[0], p[1], p[2])
approx = approx.reshape(1, 3)
assert np.allclose(approx, exact, atol=ATOL)
# Testing Trilinear Approx function on a midpoint
approx = tlApprox(mid[0], mid[1], mid[2])
approx = approx.reshape(1, 3)
arr = np.isclose(approx, [-5.39130435, -21.5652174, 23.68667299], atol=ATOL)
assert arr.all()
|
14,501 |
def check_support(vevent: icalendar.cal.Event, href: str, calendar: str):
"""test if all icalendar features used in this event are supported,
raise `UpdateFailed` otherwise.
:param vevent: event to test
:param href: href of this event, only used for logging
"""
rec_id = vevent.get(RECURRENCE_ID)
if rec_id is not None and rec_id.params.get('RANGE') == THISANDPRIOR:
raise UpdateFailed(
'The parameter `THISANDPRIOR` is not (and will not be) '
'supported by khal (as applications supporting the latest '
'standard MUST NOT create those. Therefore event {} from '
'calendar {} will not be shown in khal'
.format(href, calendar)
)
rdate = vevent.get('RDATE')
if rdate is not None and hasattr(rdate, 'params') and rdate.params.get('VALUE') == 'PERIOD':
raise UpdateFailed(
'`RDATE;VALUE=PERIOD` is currently not supported by khal. '
'Therefore event {} from calendar {} will not be shown in khal.\n'
'Please post exemplary events (please remove any private data) '
'to https://github.com/pimutils/khal/issues/152 .'
.format(href, calendar)
)
|
def check_support(vevent: icalendar.cal.Event, href: str, calendar: str):
"""test if all icalendar features used in this event are supported,
raise `UpdateFailed` otherwise.
:param vevent: event to test
:param href: href of this event, only used for logging
"""
rec_id = vevent.get(RECURRENCE_ID)
if rec_id is not None and rec_id.params.get('RANGE') == THISANDPRIOR:
raise UpdateFailed(
'The parameter `THISANDPRIOR` is not (and will not be) '
'supported by khal (as applications supporting the latest '
'standard MUST NOT create those. Therefore event {} from '
'calendar {} will not be shown in khal'
.format(href, calendar)
)
rdate = vevent.get('RDATE')
if rdate is not None and hasattr(rdate, 'params') and rdate.params.get('VALUE') == 'PERIOD':
raise UpdateFailed(
'`RDATE;VALUE=PERIOD` is currently not supported by khal. '
f'Therefore event {href} from calendar {calendar} will not be shown in khal.\n'
'Please post exemplary events (please remove any private data) '
'to https://github.com/pimutils/khal/issues/152 .'
.format(href, calendar)
)
|
376 |
def make_initial_point_fn(
*,
model,
overrides: Optional[StartDict] = None,
jitter_rvs: Optional[Set[TensorVariable]] = None,
default_strategy: str = "moment",
return_transformed: bool = True,
) -> Callable:
"""Create seeded function that computes initial values for all free model variables.
Parameters
----------
jitter_rvs : set
The set (or list or tuple) of random variables for which a U(-1, +1) jitter should be
added to the initial value. Only available for variables that have a transform or real-valued support.
default_strategy : str
Which of { "moment", "prior" } to prefer if the initval setting for an RV is None.
overrides : dict
Initial value (strategies) to use instead of what's specified in `Model.initial_values`.
return_transformed : bool
If `True` the returned variables will correspond to transformed initial values.
"""
def find_rng_nodes(variables):
return [
node
for node in graph_inputs(variables)
if isinstance(
node,
(
at.random.var.RandomStateSharedVariable,
at.random.var.RandomGeneratorSharedVariable,
),
)
]
sdict_overrides = convert_str_to_rv_dict(model, overrides or {})
initval_srtats = {
**model.initial_values,
**sdict_overrides,
}
initial_values = make_initial_point_expression(
free_rvs=model.free_RVs,
rvs_to_values=model.rvs_to_values,
initval_strategies=initval_srtats,
jitter_rvs=jitter_rvs,
default_strategy=default_strategy,
return_transformed=return_transformed,
)
# Replace original rng shared variables so that we don't mess with them
# when calling the final seeded function
graph = FunctionGraph(outputs=initial_values, clone=False)
rng_nodes = find_rng_nodes(graph.outputs)
new_rng_nodes: List[Union[np.random.RandomState, np.random.Generator]] = []
for rng_node in rng_nodes:
rng_cls: type
if isinstance(rng_node, at.random.var.RandomStateSharedVariable):
rng_cls = np.random.RandomState
else:
rng_cls = np.random.Generator
new_rng_nodes.append(aesara.shared(rng_cls(np.random.PCG64())))
graph.replace_all(zip(rng_nodes, new_rng_nodes), import_missing=True)
func = compile_pymc(inputs=[], outputs=graph.outputs, mode=aesara.compile.mode.FAST_COMPILE)
varnames = []
for var in model.free_RVs:
transform = getattr(model.rvs_to_values[var].tag, "transform", None)
if transform is not None and return_transformed:
name = get_transformed_name(var.name, transform)
else:
name = var.name
varnames.append(name)
def make_seeded_function(func):
rngs = find_rng_nodes(func.maker.fgraph.outputs)
@functools.wraps(func)
def inner(seed, *args, **kwargs):
seeds = [
np.random.PCG64(sub_seed)
for sub_seed in np.random.SeedSequence(seed).spawn(len(rngs))
]
for rng, seed in zip(rngs, seeds):
if isinstance(rng, at.random.var.RandomStateSharedVariable):
new_rng = np.random.RandomState(seed)
else:
new_rng = np.random.Generator(seed)
rng.set_value(new_rng, True)
values = func(*args, **kwargs)
return dict(zip(varnames, values))
return inner
return make_seeded_function(func)
|
def make_initial_point_fn(
*,
model,
overrides: Optional[StartDict] = None,
jitter_rvs: Optional[Set[TensorVariable]] = None,
default_strategy: str = "moment",
return_transformed: bool = True,
) -> Callable:
"""Create seeded function that computes initial values for all free model variables.
Parameters
----------
jitter_rvs : set
The set (or list or tuple) of random variables for which a U(-1, +1) jitter should be
added to the initial value. Only available for variables that have a transform or real-valued support.
default_strategy : str
Which of { "moment", "prior" } to prefer if the initval setting for an RV is None.
overrides : dict
Initial value (strategies) to use instead of what's specified in `Model.initial_values`.
return_transformed : bool
If `True` the returned variables will correspond to transformed initial values.
"""
def find_rng_nodes(variables):
return [
node
for node in graph_inputs(variables)
if isinstance(
node,
(
at.random.var.RandomStateSharedVariable,
at.random.var.RandomGeneratorSharedVariable,
),
)
]
sdict_overrides = convert_str_to_rv_dict(model, overrides or {})
initval_srtats = {
**model.initial_values,
**sdict_overrides,
}
initial_values = make_initial_point_expression(
free_rvs=model.free_RVs,
rvs_to_values=model.rvs_to_values,
initval_strategies=initval_strats,
jitter_rvs=jitter_rvs,
default_strategy=default_strategy,
return_transformed=return_transformed,
)
# Replace original rng shared variables so that we don't mess with them
# when calling the final seeded function
graph = FunctionGraph(outputs=initial_values, clone=False)
rng_nodes = find_rng_nodes(graph.outputs)
new_rng_nodes: List[Union[np.random.RandomState, np.random.Generator]] = []
for rng_node in rng_nodes:
rng_cls: type
if isinstance(rng_node, at.random.var.RandomStateSharedVariable):
rng_cls = np.random.RandomState
else:
rng_cls = np.random.Generator
new_rng_nodes.append(aesara.shared(rng_cls(np.random.PCG64())))
graph.replace_all(zip(rng_nodes, new_rng_nodes), import_missing=True)
func = compile_pymc(inputs=[], outputs=graph.outputs, mode=aesara.compile.mode.FAST_COMPILE)
varnames = []
for var in model.free_RVs:
transform = getattr(model.rvs_to_values[var].tag, "transform", None)
if transform is not None and return_transformed:
name = get_transformed_name(var.name, transform)
else:
name = var.name
varnames.append(name)
def make_seeded_function(func):
rngs = find_rng_nodes(func.maker.fgraph.outputs)
@functools.wraps(func)
def inner(seed, *args, **kwargs):
seeds = [
np.random.PCG64(sub_seed)
for sub_seed in np.random.SeedSequence(seed).spawn(len(rngs))
]
for rng, seed in zip(rngs, seeds):
if isinstance(rng, at.random.var.RandomStateSharedVariable):
new_rng = np.random.RandomState(seed)
else:
new_rng = np.random.Generator(seed)
rng.set_value(new_rng, True)
values = func(*args, **kwargs)
return dict(zip(varnames, values))
return inner
return make_seeded_function(func)
|
36,906 |
def add_parser(subparsers, parent_parser):
FREEZE_HELP = "Freeze stages or .dvc files."
freeze_parser = subparsers.add_parser(
"freeze",
parents=[parent_parser],
description=append_doc_link(FREEZE_HELP, "freeze"),
help=FREEZE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
freeze_parser.add_argument(
"targets",
nargs="+",
help="Stages or .dvc files to freeze.",
metavar="targets",
choices=completion.Required.DVC_FILE,
)
freeze_parser.set_defaults(func=CmdFreeze)
UNFREEZE_HELP = "Unfreeze stages or .dvc files."
unfreeze_parser = subparsers.add_parser(
"unfreeze",
parents=[parent_parser],
description=append_doc_link(UNFREEZE_HELP, "unfreeze"),
help=UNFREEZE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
unfreeze_parser.add_argument(
"targets",
nargs="+",
help="Stages or .dvc files to unfreeze.",
metavar="targets",
choices=completion.Required.DVC_FILE,
)
unfreeze_parser.set_defaults(func=CmdUnfreeze)
|
def add_parser(subparsers, parent_parser):
FREEZE_HELP = "Freeze stages or .dvc files."
freeze_parser = subparsers.add_parser(
"freeze",
parents=[parent_parser],
description=append_doc_link(FREEZE_HELP, "freeze"),
help=FREEZE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
freeze_parser.add_argument(
"targets",
nargs="+",
help="Stages or .dvc files to freeze",
metavar="targets",
choices=completion.Required.DVC_FILE,
)
freeze_parser.set_defaults(func=CmdFreeze)
UNFREEZE_HELP = "Unfreeze stages or .dvc files."
unfreeze_parser = subparsers.add_parser(
"unfreeze",
parents=[parent_parser],
description=append_doc_link(UNFREEZE_HELP, "unfreeze"),
help=UNFREEZE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
unfreeze_parser.add_argument(
"targets",
nargs="+",
help="Stages or .dvc files to unfreeze.",
metavar="targets",
choices=completion.Required.DVC_FILE,
)
unfreeze_parser.set_defaults(func=CmdUnfreeze)
|
26,458 |
def get_new_command(command):
# because composer lets you install many packages at once, must look at output to determine the erroneous package name
wrong_package_name = re.search(r"Could not find package (.*)\.", command.output).group(1)
offending_script_param = wrong_package_name if (wrong_package_name in command.script_parts) else re.findall(
r"{}:[^ ]+".format(wrong_package_name), command.script)[0]
version_constraint = offending_script_param[len(wrong_package_name):]
one_suggestion_only = 'did you mean this?' in command.output.lower()
if one_suggestion_only:
# wrong regex??
new_cmd = re.findall(r'Did you mean this\?[^\n]*\n\s*([^\n]*)', command.output)
return replace_argument(command.script, offending_script_param, new_cmd[0].strip() + version_constraint)
else:
# there are multiple suggestions
# trim output text to make it more digestable by regex
trim_start_index = command.output.find("Did you mean one of these?")
short_output = command.output[trim_start_index:]
stripped_lines = [line.strip() for line in short_output.split("\n")]
# each of the suggested commands can be found from index 1 to the first occurence of blank string
try:
end_index = stripped_lines.index('')
except ValueError:
end_index = None
suggested_commands = stripped_lines[1:end_index]
return [
replace_argument(command.script, offending_script_param, cmd + version_constraint)
for cmd in suggested_commands
]
|
def get_new_command(command):
# because composer lets you install many packages at once, must look at output to determine the erroneous package name
wrong_package_name = re.search(r"Could not find package (.*)\.", command.output).group(1)
offending_script_param = wrong_package_name if (wrong_package_name in command.script_parts) else re.findall(
r"{}:[^ ]+".format(wrong_package_name), command.script)[0]
version_constraint = offending_script_param[len(wrong_package_name):]
one_suggestion_only = 'did you mean this?' in command.output.lower()
if one_suggestion_only:
# wrong regex??
new_cmd = re.findall(r'Did you mean this\?[^\n]*\n\s*([^\n]*)', command.output)
return replace_argument(command.script, offending_script_param, new_cmd[0].strip() + version_constraint)
else:
# there are multiple suggestions
# trim output text to make it more digestable by regex
trim_start_index = command.output.find("Did you mean one of these?")
short_output = command.output[trim_start_index:]
stripped_lines = [line.strip() for line in short_output.split("\n")]
# each of the suggested commands can be found from index 1 to the first occurence of blank string
try:
end_index = stripped_lines.index('')
except ValueError:
end_index = None
suggested_packages = stripped_lines[1:end_index]
return [
replace_argument(command.script, offending_script_param, cmd + version_constraint)
for cmd in suggested_commands
]
|
9,825 |
def add_initiator(module, array):
""""Add a host FC initiator."""
changed = False
wwn = validate_wwn(module, 'wwn is required for adding initiator.')
if module.check_mode:
module.exit_json(changed=changed)
try:
ini = array.add_initiator(
module.params['name'],
'Ansible FC initiator',
wwn)
if ini:
module.log(msg='Added initiator {0}'.format(ini['id']))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Initiator {0} add failed.'.format(wwn))
module.exit_json(changed=changed)
|
def add_initiator(module, array):
""""Add a host FC initiator."""
changed = False
wwn = validate_wwn(module)
if module.check_mode:
module.exit_json(changed=changed)
try:
ini = array.add_initiator(
module.params['name'],
'Ansible FC initiator',
wwn)
if ini:
module.log(msg='Added initiator {0}'.format(ini['id']))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Initiator {0} add failed.'.format(wwn))
module.exit_json(changed=changed)
|
22,605 |
def with_debug(
layer: Model,
name: Optional[str] = None,
*,
on_init: Optional[Callable[[Model, Any, Any], None]] = None,
on_forward: Optional[Callable[[Model, Any, bool], None]] = None,
on_backprop: Optional[Callable[[Any], None]] = None,
):
"""Debugging layer that wraps any layer and allows executing callbacks
during the forward pass, backward pass and initialization. The callbacks
will receive the same arguments as the functions they're called in.
"""
name = layer.name if name is None else name
def forward(model: Model, X: Any, is_train: bool) -> Tuple[Any, Callable]:
if on_forward:
on_forward(model, X, is_train)
layer_Y, layer_callback = layer(X, is_train=is_train)
def backprop(dY: Any) -> Any:
if on_backprop:
on_backprop(dY)
return layer_callback(dY)
return layer_Y, backprop
def init(model: Model, X: Any, Y: Any) -> Model:
if on_init:
on_init(model, X, Y)
return layer.initialize(X, Y)
return Model(f"debug:{name}", forward, init=init)
|
def with_debug(
layer: Model,
name: Optional[str] = None,
*,
on_init: Callable[[Model, Any, Any], None] = do_nothing,
on_forward: Optional[Callable[[Model, Any, bool], None]] = None,
on_backprop: Optional[Callable[[Any], None]] = None,
):
"""Debugging layer that wraps any layer and allows executing callbacks
during the forward pass, backward pass and initialization. The callbacks
will receive the same arguments as the functions they're called in.
"""
name = layer.name if name is None else name
def forward(model: Model, X: Any, is_train: bool) -> Tuple[Any, Callable]:
if on_forward:
on_forward(model, X, is_train)
layer_Y, layer_callback = layer(X, is_train=is_train)
def backprop(dY: Any) -> Any:
if on_backprop:
on_backprop(dY)
return layer_callback(dY)
return layer_Y, backprop
def init(model: Model, X: Any, Y: Any) -> Model:
if on_init:
on_init(model, X, Y)
return layer.initialize(X, Y)
return Model(f"debug:{name}", forward, init=init)
|
8,775 |
def is_triggerable(obj):
"""Check if ``obj`` can handle the bot's triggers.
:param obj: any :term:`function` to check.
:return: ``True`` if ``obj`` can handle the bot's triggers.
A triggerable is a callable that will be used by the bot to handle a
particular trigger (i.e. an IRC message): it can be a regex rule, an event,
an intent, a command or nickname command. However, it must not be a job
or an URL callback.
.. seealso::
The :mod:`sopel.module` defines decorators to make a function a
triggerable object.
"""
forbidden = (
'interval',
'url_regex',
)
must_not = not any(hasattr(obj, attr) for attr in forbidden)
allowed = (
'rule',
'event',
'intents',
'commands',
'nickname_commands',
)
return must_not and any(hasattr(obj, attr) for attr in allowed)
|
def is_triggerable(obj):
"""Check if ``obj`` can handle the bot's triggers.
:param obj: any :term:`function` to check.
:return: ``True`` if ``obj`` can handle the bot's triggers.
A triggerable is a callable that will be used by the bot to handle a
particular trigger (i.e. an IRC message): it can be a regex rule, an event,
an intent, a command or nickname command. However, it must not be a job
or an URL callback.
.. seealso::
The :mod:`sopel.module` defines decorators to make a function a
triggerable object.
"""
forbidden = (
'interval',
'url_regex',
)
must_not = not any(hasattr(obj, attr) for attr in forbidden)
allowed = (
'rule',
'event',
'intents',
'commands',
'nickname_commands',
)
return allowed and not forbidden
|
14,052 |
def _read_parquet(path, columns=None, **kwargs):
"""
Load a Parquet object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_parquet` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow'.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
**kwargs
Any additional kwargs passed to pyarrow.parquet.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_parquet("data.parquet) # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_parquet(
... "data.parquet,
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
parquet = import_optional_dependency(
"pyarrow.parquet", extra="pyarrow is required for Parquet support."
)
kwargs["use_pandas_metadata"] = True
table = parquet.read_table(path, columns=columns, **kwargs)
return _arrow_to_geopandas(table)
|
def _read_parquet(path, columns=None, **kwargs):
"""
Load a Parquet object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_parquet` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow'.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
**kwargs
Any additional kwargs passed to pyarrow.parquet.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_parquet("data.parquet") # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_parquet(
... "data.parquet,
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
parquet = import_optional_dependency(
"pyarrow.parquet", extra="pyarrow is required for Parquet support."
)
kwargs["use_pandas_metadata"] = True
table = parquet.read_table(path, columns=columns, **kwargs)
return _arrow_to_geopandas(table)
|
59,175 |
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdin__, sys.__stderr__, or sys.__stdout__
is queried by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
os_get_terminal_size = os.get_terminal_size
except AttributeError:
size = os.terminal_size(fallback)
else:
for check in [sys.__stdin__, sys.__stderr__, sys.__stdout__]:
try:
size = os_get_terminal_size(check.fileno())
except (AttributeError, ValueError, OSError):
# fd is None, closed, detached, or not a terminal.
continue
else:
break
else:
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
|
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdin__, sys.__stderr__, or sys.__stdout__
is queried by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
os_get_terminal_size = os.get_terminal_size
except AttributeError:
size = os.terminal_size(fallback)
else:
for stream in (sys.__stdin__, sys.__stderr__, sys.__stdout__):
try:
size = os_get_terminal_size(stream.fileno())
break
except (AttributeError, ValueError, OSError):
# stream is None, closed, detached, or not a terminal.
pass
else:
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
|
32,303 |
def update_last_run_object(last_run, incidents, fetch_limit, start_fetch_time, end_fetch_time, look_back,
created_time_field, id_field, date_format='%Y-%m-%dT%H:%M:%S', increase_last_run_time=False):
"""
Updates the LastRun object with the next fetch time and limit and with the new fetched incident IDs.
:type last_run: ``dict``
:param last_run: The LastRun object
:type incidents: ``list``
:param incidents: List of the incidents result
:type fetch_limit: ``int``
:param fetch_limit: The fetch limit
:type start_fetch_time: ``str``
:param start_fetch_time: The time the fetch started to fetch from
:type end_fetch_time: ``str``
:param end_fetch_time: The time the fetch ended to fetch
:type look_back: ``int``
:param look_back: The time to look back in fetch in minutes
:type created_time_field: ``str``
:param created_time_field: The incident created time field
:type id_field: ``str``
:param id_field: The incident id field
:type date_format: ``str``
:param date_format: The date format
:type increase_last_run_time: ``bool``
:param increase_last_run_time: Whether to increase the last run time with one millisecond
:return: The updated LastRun object
:rtype: ``Dict``
"""
found_incidents = get_found_incident_ids(last_run, incidents, look_back, id_field)
updated_last_run = create_updated_last_run_object(last_run, incidents, fetch_limit, look_back, start_fetch_time,
end_fetch_time, created_time_field, date_format, increase_last_run_time)
if found_incidents:
updated_last_run.update({'found_incident_ids': found_incidents})
last_run.update(updated_last_run)
return last_run
|
def update_last_run_object(last_run, incidents, fetch_limit, start_fetch_time, end_fetch_time, look_back,
created_time_field, id_field, date_format='%Y-%m-%dT%H:%M:%S', increase_last_run_time=False):
"""
Updates the LastRun object with the next fetch time and limit and with the new fetched incident IDs.
:type last_run: ``dict``
:param last_run: The LastRun object
:type incidents: ``list``
:param incidents: List of the incidents result
:type fetch_limit: ``int``
:param fetch_limit: The fetch limit
:type start_fetch_time: ``str``
:param start_fetch_time: The time the fetch started to fetch from
:type end_fetch_time: ``str``
:param end_fetch_time: The end time in which the fetch incidents ended
:type look_back: ``int``
:param look_back: The time to look back in fetch in minutes
:type created_time_field: ``str``
:param created_time_field: The incident created time field
:type id_field: ``str``
:param id_field: The incident id field
:type date_format: ``str``
:param date_format: The date format
:type increase_last_run_time: ``bool``
:param increase_last_run_time: Whether to increase the last run time with one millisecond
:return: The updated LastRun object
:rtype: ``Dict``
"""
found_incidents = get_found_incident_ids(last_run, incidents, look_back, id_field)
updated_last_run = create_updated_last_run_object(last_run, incidents, fetch_limit, look_back, start_fetch_time,
end_fetch_time, created_time_field, date_format, increase_last_run_time)
if found_incidents:
updated_last_run.update({'found_incident_ids': found_incidents})
last_run.update(updated_last_run)
return last_run
|
35,970 |
def delete_nodes(
pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False
):
"""
Delete nodes by a list of pks.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the PKs of the nodes to delete
:param bool force: do not ask for confirmation to delete nodes.
:param int verbosity: 0 prints nothing,
1 prints just sums and total,
2 prints individual nodes.
:param bool create_forward:
This will delete all output data created by any deleted calculation.
:param bool call_calc_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. Note that when you delete a workflow, also all parent workflows are
deleted (recursively). Therefore, setting this flag to True may delete
calculations that are 'unrelated' to what has been chosen to be deleted, just
because they are connected at some point in the upwards provenance. Use with
care, and it is advisable to never combine it with force.
:param bool call_work_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. The same disclaimer as forward_calcs applies here as well.
:param bool dry_run:
Do not delete, a dry run, with statistics printed according to verbosity levels.
:param bool force:
Do not ask for confirmation to delete nodes.
"""
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
from aiida.backends.utils import delete_nodes_and_connections
from aiida.common import exceptions
from aiida.common.links import LinkType
from aiida.orm import Node, QueryBuilder, load_node
starting_pks = []
for pk in pks:
try:
load_node(pk)
except exceptions.NotExistent:
echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk))
else:
starting_pks.append(pk)
# An empty set might be problematic for the queries done below.
if not starting_pks:
if verbosity:
echo.echo('Nothing to delete')
return
follow_upwards = []
follow_upwards.append(LinkType.CREATE.value)
follow_upwards.append(LinkType.RETURN.value)
follow_upwards.append(LinkType.CALL_CALC.value)
follow_upwards.append(LinkType.CALL_WORK.value)
follow_downwards = []
follow_downwards.append(LinkType.INPUT_CALC.value)
follow_downwards.append(LinkType.INPUT_WORK.value)
if create_forward:
follow_downwards.append(LinkType.CREATE.value)
if call_calc_forward:
follow_downwards.append(LinkType.CALL_CALC.value)
if call_work_forward:
follow_downwards.append(LinkType.CALL_WORK.value)
links_upwards = {'type': {'in': follow_upwards}}
links_downwards = {'type': {'in': follow_downwards}}
operational_set = set().union(set(starting_pks))
accumulator_set = set().union(set(starting_pks))
while operational_set:
new_pks_set = set()
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_downwards,
with_incoming='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_upwards,
with_outgoing='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
operational_set = new_pks_set.difference(accumulator_set)
accumulator_set = new_pks_set.union(accumulator_set)
pks_set_to_delete = accumulator_set
if verbosity > 0:
echo.echo(
'I {} delete {} node{}'.format(
'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else ''
)
)
if verbosity > 1:
builder = QueryBuilder().append(
Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label')
)
echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will'))
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label))
if dry_run:
if verbosity > 0:
echo.echo('\nThis was a dry run, exiting without deleting anything')
return
# Asking for user confirmation here
if force:
pass
else:
echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete)))
if not click.confirm('Shall I continue?'):
echo.echo('Exiting without deleting')
return
# Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later,
# so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders
repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access
if verbosity > 0:
echo.echo('I am starting node deletion.')
delete_nodes_and_connections(pks_set_to_delete)
if verbosity > 0:
echo.echo('I have finished node deletion and I am starting folder deletion.')
# If we are here, we managed to delete the entries from the DB.
# I can now delete the folders
for repository in repositories:
repository.erase(force=True)
if verbosity > 0:
echo.echo('I have finished folder deletion. Deletion completed.')
|
def delete_nodes(
pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False
):
"""
Delete nodes by a list of pks.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the PKs of the nodes to delete
:param bool force: do not ask for confirmation to delete nodes.
:param int verbosity: 0 prints nothing,
1 prints just sums and total,
2 prints individual nodes.
:param bool create_forward:
This will delete all output data created by any deleted calculation.
:param bool call_calc_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. Note that when you delete a workflow, also all parent workflows are
deleted (recursively). Therefore, setting this flag to True may delete
calculations that are 'unrelated' to what has been chosen to be deleted, just
because they are connected at some point in the upwards provenance. Use with
care, and it is advisable to never combine it with force.
:param bool call_work_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. The same disclaimer as forward_calcs applies here as well.
:param bool dry_run:
Do not delete, a dry run, with statistics printed according to verbosity levels.
:param bool force:
Do not ask for confirmation to delete nodes.
"""
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
from aiida.backends.utils import delete_nodes_and_connections
from aiida.common import exceptions
from aiida.common.links import LinkType
from aiida.orm import Node, QueryBuilder, load_node
starting_pks = []
for pk in pks:
try:
load_node(pk)
except exceptions.NotExistent:
echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk))
else:
starting_pks.append(pk)
# An empty set might be problematic for the queries done below.
if not starting_pks:
if verbosity:
echo.echo('Nothing to delete')
return
follow_upwards = []
follow_upwards.append(LinkType.CREATE.value)
follow_upwards.append(LinkType.RETURN.value)
follow_upwards.append(LinkType.CALL_CALC.value)
follow_upwards.append(LinkType.CALL_WORK.value)
follow_downwards = []
follow_downwards.append(LinkType.INPUT_CALC.value)
follow_downwards.append(LinkType.INPUT_WORK.value)
if create_forward:
follow_downwards.append(LinkType.CREATE.value)
if call_calc_forward:
follow_downwards.append(LinkType.CALL_CALC.value)
if call_work_forward:
follow_downwards.append(LinkType.CALL_WORK.value)
links_upwards = {'type': {'in': follow_upwards}}
links_downwards = {'type': {'in': follow_downwards}}
operational_set = set().union(set(starting_pks))
accumulator_set = set().union(set(starting_pks))
while operational_set:
new_pks_set = set()
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_downwards,
with_incoming='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_upwards,
with_outgoing='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
operational_set = new_pks_set.difference(accumulator_set)
accumulator_set = new_pks_set.union(accumulator_set)
pks_set_to_delete = accumulator_set
if verbosity > 0:
echo.echo(
'I {} delete {} node{}'.format(
'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else ''
)
)
if verbosity > 1:
builder = QueryBuilder().append(
Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label')
)
echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will'))
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label))
if dry_run:
if verbosity > 0:
echo.echo('\nThis was a dry run, exiting without deleting anything')
return
# Asking for user confirmation here
if force:
pass
else:
echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete)))
if not click.confirm('Shall I continue?'):
echo.echo('Exiting without deleting')
return
# Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later,
# so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders
repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access
if verbosity > 0:
echo.echo('I am starting node deletion.')
delete_nodes_and_connections(pks_set_to_delete)
if verbosity > 0:
echo.echo('Nodes deleted from the database, deleting files from the repository now...')
# If we are here, we managed to delete the entries from the DB.
# I can now delete the folders
for repository in repositories:
repository.erase(force=True)
if verbosity > 0:
echo.echo('I have finished folder deletion. Deletion completed.')
|
49,695 |
def solve_plugin_dependencies(plugins):
"""
Return a list of plugins sorted by dependencies.
Notes
-----
* Prune the plugins for which required dependencies are not met
* Prune the optional dependencies from the remaining plugins based on
the remaining plugins available.
* Group the remaining optional dependencies with the required
dependencies.
* Sort with toposort algorithm.
"""
# Back up dependencies
for plugin in plugins:
if plugin.REQUIRES is None:
plugin.REQUIRES = []
if plugin.OPTIONAL is None:
plugin.OPTIONAL = []
plugin._REQUIRES = plugin.REQUIRES.copy()
plugin._OPTIONAL = plugin.OPTIONAL.copy()
plugin_names = {plugin.NAME: plugin for plugin in plugins}
dependencies_dict = {}
# Prune plugins based on required dependencies or populate the dependencies
# if using a wildcard i.e 'Plugins.All' or to add base dependencies for
# example the Shortcuts plugin to all SpyderDockablePlugin (shortcut for
# the switch to plugin action)
remaining_plugins = []
for plugin in plugins:
if issubclass(plugin, SpyderDockablePlugin):
if Plugins.Shortcuts not in plugin.REQUIRES:
plugin.REQUIRES.append(Plugins.Shortcuts)
plugin._REQUIRES = plugin.REQUIRES.copy()
for required in plugin.REQUIRES[:]:
# Check self references
if plugin.NAME == required:
raise SpyderAPIError("Plugin is self referencing!")
if (required == Plugins.All and len(plugin.REQUIRES) == 1):
all_plugins = plugin_names.copy()
all_plugins.pop(plugin.NAME)
plugin.REQUIRES = list(all_plugins)
plugin._REQUIRES = plugin.REQUIRES.copy()
logger.info("Added all plugins as dependencies to plugin: " +
plugin.NAME)
continue
if required not in plugin_names:
plugin_names.pop(plugin.NAME)
logger.error("Pruned plugin: " + plugin.NAME)
break
else:
remaining_plugins.append(plugin)
# Prune optional dependencies from remaining plugins
for plugin in remaining_plugins:
for optional in plugin.OPTIONAL:
if optional not in plugin_names:
plugin._OPTIONAL.remove(optional)
plugin._REQUIRES += plugin._OPTIONAL
dependencies_dict[plugin.NAME] = set(plugin._REQUIRES)
# Now use toposort with plugin._REQUIRES!
deps = toposort_flatten(dependencies_dict)
plugin_deps = [plugin_names[name] for name in deps]
return plugin_deps
|
def solve_plugin_dependencies(plugins):
"""
Return a list of plugins sorted by dependencies.
Notes
-----
* Prune the plugins for which required dependencies are not met
* Prune the optional dependencies from the remaining plugins based on
the remaining plugins available.
* Group the remaining optional dependencies with the required
dependencies.
* Sort with toposort algorithm.
"""
# Back up dependencies
for plugin in plugins:
if plugin.REQUIRES is None:
plugin.REQUIRES = []
if plugin.OPTIONAL is None:
plugin.OPTIONAL = []
plugin._REQUIRES = plugin.REQUIRES.copy()
plugin._OPTIONAL = plugin.OPTIONAL.copy()
plugin_names = {plugin.NAME: plugin for plugin in plugins}
dependencies_dict = {}
# Prune plugins based on required dependencies or populate the dependencies
# if using a wildcard i.e 'Plugins.All' or to add base dependencies for
# example the Shortcuts plugin to all SpyderDockablePlugin's (shortcut for
# the "switch to plugin" action).
remaining_plugins = []
for plugin in plugins:
if issubclass(plugin, SpyderDockablePlugin):
if Plugins.Shortcuts not in plugin.REQUIRES:
plugin.REQUIRES.append(Plugins.Shortcuts)
plugin._REQUIRES = plugin.REQUIRES.copy()
for required in plugin.REQUIRES[:]:
# Check self references
if plugin.NAME == required:
raise SpyderAPIError("Plugin is self referencing!")
if (required == Plugins.All and len(plugin.REQUIRES) == 1):
all_plugins = plugin_names.copy()
all_plugins.pop(plugin.NAME)
plugin.REQUIRES = list(all_plugins)
plugin._REQUIRES = plugin.REQUIRES.copy()
logger.info("Added all plugins as dependencies to plugin: " +
plugin.NAME)
continue
if required not in plugin_names:
plugin_names.pop(plugin.NAME)
logger.error("Pruned plugin: " + plugin.NAME)
break
else:
remaining_plugins.append(plugin)
# Prune optional dependencies from remaining plugins
for plugin in remaining_plugins:
for optional in plugin.OPTIONAL:
if optional not in plugin_names:
plugin._OPTIONAL.remove(optional)
plugin._REQUIRES += plugin._OPTIONAL
dependencies_dict[plugin.NAME] = set(plugin._REQUIRES)
# Now use toposort with plugin._REQUIRES!
deps = toposort_flatten(dependencies_dict)
plugin_deps = [plugin_names[name] for name in deps]
return plugin_deps
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.