id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
48,303 | def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
cpm_url=dict(type='str', required=True),
cpm_username=dict(type='str', required=True),
cpm_password=dict(type='str', required=True, no_log=True),
port=dict(type='int', required=True),
portname=dict(type='str', required=False, default=None),
baud=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
handshake=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3]),
stopbits=dict(type='int', required=False, default=None, choices=[0, 1]),
parity=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]),
mode=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4]),
cmd=dict(type='int', required=False, default=None, choices=[0, 1]),
seq=dict(type='int', required=False, default=None, choices=[1, 2, 3]),
tout=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]),
echo=dict(type='int', required=False, default=None, choices=[0, 1]),
break_allow=dict(type='int', required=False, default=None, choices=[0, 1]),
logoff=dict(type='str', required=False, default=None),
use_https=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
use_proxy=dict(type='bool', default=False)
)
result = dict(
changed=False,
data=''
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])),
errors='surrogate_or_strict')))
if module.params['use_https'] is True:
protocol = "https://"
else:
protocol = "http://"
fullurl = ("%s%s/api/v2/config/serialports?ports=%s" % (protocol, to_native(module.params['cpm_url']), to_native(module.params['port'])))
method = 'GET'
read_data = ""
try:
response = open_url(fullurl, data=None, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
fail_json = dict(msg='GET: Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='GET: Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='GET: Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='GET: Error connecting to {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
read_data = json.loads(response.read())
payload = assemble_json(module, read_data)
if ((module.check_mode) | (payload is None)):
result['data'] = read_data
else:
fullurl = ("%s%s/api/v2/config/serialports" % (protocol, to_native(module.params['cpm_url'])))
method = 'POST'
try:
response = open_url(fullurl, data=payload, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
fail_json = dict(msg='POST: Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='POST: Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='POST: Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='POST: Error connecting to {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
result['changed'] = True
result['data'] = json.loads(response.read())
module.exit_json(**result)
| def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
cpm_url=dict(type='str', required=True),
cpm_username=dict(type='str', required=True),
cpm_password=dict(type='str', required=True, no_log=True),
port=dict(type='int', required=True),
portname=dict(type='str', required=False, default=None),
baud=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
handshake=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3]),
stopbits=dict(type='int', required=False, default=None, choices=[0, 1]),
parity=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]),
mode=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4]),
cmd=dict(type='int', required=False, default=None, choices=[0, 1]),
seq=dict(type='int', required=False, default=None, choices=[1, 2, 3]),
tout=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]),
echo=dict(type='int', required=False, default=None, choices=[0, 1]),
break_allow=dict(type='int', required=False, default=None, choices=[0, 1]),
logoff=dict(type='str', required=False, default=None),
use_https=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
use_proxy=dict(type='bool', default=False)
)
result = dict(
changed=False,
data=''
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])),
errors='surrogate_or_strict')))
if module.params['use_https'] is True:
protocol = "https://"
else:
protocol = "http://"
fullurl = ("%s%s/api/v2/config/serialports?ports=%s" % (protocol, to_native(module.params['cpm_url']), to_native(module.params['port'])))
method = 'GET'
read_data = ""
try:
response = open_url(fullurl, data=None, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
fail_json = dict(msg='GET: Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='GET: Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='GET: Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='GET: Error connecting to {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
read_data = json.loads(response.read())
payload = assemble_json(module, read_data)
result['data'] = read_data
if module.check_mode:
if payload is not None:
result['changed'] = True
else:
if payload is not None:
fullurl = ("%s%s/api/v2/config/serialports" % (protocol, to_native(module.params['cpm_url'])))
method = 'POST'
try:
response = open_url(fullurl, data=payload, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
fail_json = dict(msg='POST: Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='POST: Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='POST: Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='POST: Error connecting to {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
result['changed'] = True
result['data'] = json.loads(response.read())
if ((module.check_mode) | (payload is None)):
result['data'] = read_data
else:
fullurl = ("%s%s/api/v2/config/serialports" % (protocol, to_native(module.params['cpm_url'])))
method = 'POST'
try:
response = open_url(fullurl, data=payload, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
fail_json = dict(msg='POST: Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='POST: Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='POST: Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='POST: Error connecting to {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
result['changed'] = True
result['data'] = json.loads(response.read())
module.exit_json(**result)
|
6,456 | def prepare_chart_data(item_data):
labels, qty_to_order, ordered_qty, received_qty, pending_qty = [], [], [], [], []
if len(item_data) > 30:
item_data = dict(list(item_data.items())[:30])
for row in item_data:
mr_row = item_data[row]
labels.append(row)
qty_to_order.append(mr_row["qty_to_order"])
ordered_qty.append(mr_row["ordered_qty"])
received_qty.append(mr_row["received_qty"])
pending_qty.append(mr_row["pending_qty"])
chart_data = {
"data" : {
"labels": labels,
"datasets": [
{
'name': _('Qty to Order'),
'values': qty_to_order
},
{
'name': _('Ordered Qty'),
'values': ordered_qty
},
{
'name': _('Received Qty'),
'values': received_qty
},
{
'name': _('Pending Qty'),
'values': pending_qty
}
]
},
"type": "bar",
"barOptions": {
"stacked": 1
},
}
return chart_data
| def prepare_chart_data(item_data):
labels, qty_to_order, ordered_qty, received_qty, pending_qty = [], [], [], [], []
if len(item_data) > 30:
item_data = dict(list(item_data.items())[:30])
for row in item_data:
mr_row = item_data[row]
labels.append(row)
qty_to_order.append(mr_row["qty_to_order"])
ordered_qty.append(mr_row["ordered_qty"])
received_qty.append(mr_row["received_qty"])
qty_to_receive.append(mr_row["pending_qty"])
chart_data = {
"data" : {
"labels": labels,
"datasets": [
{
'name': _('Qty to Order'),
'values': qty_to_order
},
{
'name': _('Ordered Qty'),
'values': ordered_qty
},
{
'name': _('Received Qty'),
'values': received_qty
},
{
'name': _('Pending Qty'),
'values': pending_qty
}
]
},
"type": "bar",
"barOptions": {
"stacked": 1
},
}
return chart_data
|
37,818 | def build(options: BuildOptions) -> None:
try:
# check docker is installed
subprocess.run(["docker", "--version"], check=True, stdout=subprocess.DEVNULL)
except Exception:
print(
"cibuildwheel: Docker not found. Docker is required to run Linux builds. "
"If you're building on Travis CI, add `services: [docker]` to your .travis.yml."
"If you're building on Circle CI in Linux, add a `setup_remote_docker` step to your .circleci/config.yml",
file=sys.stderr,
)
sys.exit(2)
assert options.manylinux_images is not None
python_configurations = get_python_configurations(options.build_selector, options.architectures)
platforms = [
("cp", "manylinux_aarch64", options.manylinux_images["cross_aarch64"]),
]
cwd = Path.cwd()
abs_package_dir = options.package_dir.resolve()
if cwd != abs_package_dir and cwd not in abs_package_dir.parents:
raise Exception("package_dir must be inside the working directory")
container_project_path = PurePath("/project")
container_package_dir = container_project_path / abs_package_dir.relative_to(cwd)
container_output_dir = PurePath("/output")
print("\nRegistering qemu to run ppc64le/AArch64 docker containers...\n")
setup_qemu()
for implementation, platform_tag, docker_image in platforms:
platform_configs = [
c
for c in python_configurations
if c.identifier.startswith(implementation) and c.identifier.endswith(platform_tag)
]
if not platform_configs:
continue
try:
log.step(f"Starting Docker image {docker_image}...")
with DockerContainer(
docker_image,
simulate_32_bit=platform_tag.endswith("i686"),
cwd=container_project_path,
) as docker:
log.step("Copying project into Docker...")
docker.copy_into(Path.cwd(), container_project_path)
target_arch = platform_tag_to_arch(platform_tag)
if options.before_all:
log.step("Running before_all...")
env = docker.get_environment()
env["PATH"] = f'/opt/python/cp38-cp38/bin:{env["PATH"]}'
env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
env = options.environment.as_dictionary(
env, executor=docker.environment_executor
)
before_all_prepared = prepare_command(
options.before_all,
project=container_project_path,
package=container_package_dir,
)
execute_cmd(
docker=docker,
cmd_str=before_all_prepared,
before_build=False,
target_arch=target_arch,
env=env,
)
for config in platform_configs:
log.build_start(config.identifier)
dependency_constraint_flags: List[PathOrStr] = []
if options.dependency_constraints:
constraints_file = options.dependency_constraints.get_for_python_version(
config.version
)
container_constraints_file = PurePath("/constraints.txt")
docker.copy_into(constraints_file, container_constraints_file)
dependency_constraint_flags = ["-c", container_constraints_file]
log.step("Setting up build environment...")
env = docker.get_environment()
# put this config's python top of the list
python_bin = config.path / "bin"
env["PATH"] = f'{python_bin}:{env["PATH"]}'
cross_py = str(config.path)
build_py = cross_py[: cross_py.rindex("/")]
build_py_bin = f"{build_py}/bin"
env["PATH"] = f"{build_py_bin}:{env['PATH']}"
env = options.environment.as_dictionary(
env, executor=docker.environment_executor
)
# check config python is still on PATH
which_python = docker.call(
["which", "python"], env=env, capture_output=True
).strip()
if PurePath(which_python) != python_bin / "python":
print(
"cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
file=sys.stderr,
)
sys.exit(1)
which_pip = docker.call(["which", "pip"], env=env, capture_output=True).strip()
if PurePath(which_pip) != python_bin / "pip":
print(
"cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.",
file=sys.stderr,
)
sys.exit(1)
if options.before_build:
log.step("Running before_build...")
before_build_prepared = prepare_command(
options.before_build,
project=container_project_path,
package=container_package_dir,
)
execute_cmd(
docker=docker,
cmd_str=before_build_prepared,
before_build=True,
target_arch=target_arch,
env=env,
)
log.step("Building wheel...")
temp_dir = PurePath("/tmp/cibuildwheel")
built_wheel_dir = temp_dir / "built_wheel"
docker.call(["rm", "-rf", built_wheel_dir])
docker.call(["mkdir", "-p", built_wheel_dir])
verbosity_flags = get_build_verbosity_extra_flags(options.build_verbosity)
# pip wheel is not working properly with crossenv, use "bdist_wheel" for now
docker.call(
[
"python",
"setup.py",
"bdist_wheel",
f"--dist-dir={built_wheel_dir}",
*verbosity_flags,
],
env=env,
cwd=container_package_dir,
)
built_wheel = docker.glob(built_wheel_dir, "*.whl")[0]
repaired_wheel_dir = temp_dir / "repaired_wheel"
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
# Because we will repair the wheel in a different container, we need to
# changing the path with respect host machine. We will copy the built
# wheels on host machine before accessing these
built_wheel = PurePath(target_arch_env.host + built_wheel.__str__())
repaired_wheel_dir = PurePath(
target_arch_env.host + repaired_wheel_dir.__str__()
)
docker.call(["rm", "-rf", repaired_wheel_dir])
docker.call(["mkdir", "-p", repaired_wheel_dir])
if built_wheel.name.endswith("none-any.whl"):
raise NonPlatformWheelError()
# We will repair the wheel in a different environment, copy the
# built wheels back on host machine along with the script used to
# repair the wheel
docker.call(
["cp", "-r", temp_dir, target_arch_env.host_machine_tmp_in_container]
)
docker.call(
[
"cp",
target_arch_env.tmp + "/repair_wheel.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
with DockerContainer(
native_docker_images[target_arch],
simulate_32_bit=platform_tag.endswith("i686"),
cwd=container_project_path,
) as native_docker:
if options.repair_command:
log.step("Repairing wheel...")
repair_command_prepared = prepare_command(
options.repair_command,
wheel=built_wheel,
dest_dir=repaired_wheel_dir,
)
# Repair the wheel in a architecture specific container
native_docker.call(
[
target_arch_env.host_machine_tmp_in_container
+ "/repair_wheel.sh",
target_arch_env.host_machine_deps_in_container,
repair_command_prepared,
]
)
else:
native_docker.call(["mv", built_wheel, repaired_wheel_dir])
repaired_wheels = native_docker.glob(repaired_wheel_dir, "*.whl")
if options.test_command and options.test_selector(config.identifier):
log.step("Testing wheel...")
# We are testing in a different container so we need to copy the
# project and constraints file into it.
native_docker.copy_into(Path.cwd(), container_project_path)
native_docker.copy_into(constraints_file, container_constraints_file)
# Setting the path to current python version
envxc = native_docker.get_environment()
path = env["PATH"].replace("-xc", "")
envxc["PATH"] = f"{path}:envxc['PATH']"
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
native_docker.call(
["pip", "install", "virtualenv", *dependency_constraint_flags],
env=envxc,
)
venv_dir = (
PurePath(
native_docker.call(
["mktemp", "-d"], capture_output=True
).strip()
)
/ "venv"
)
native_docker.call(
["python", "-m", "virtualenv", "--no-download", venv_dir], env=envxc
)
virtualenv_env = envxc.copy()
virtualenv_env["PATH"] = f"{venv_dir / 'bin'}:{virtualenv_env['PATH']}"
if options.before_test:
before_test_prepared = prepare_command(
options.before_test,
project=container_project_path,
package=container_package_dir,
)
native_docker.call(
["sh", "-c", before_test_prepared], env=virtualenv_env
)
# Install the wheel we just built
# Note: If auditwheel produced two wheels, it's because the earlier produced wheel
# conforms to multiple manylinux standards. These multiple versions of the wheel are
# functionally the same, differing only in name, wheel metadata, and possibly include
# different external shared libraries. so it doesn't matter which one we run the tests on.
# Let's just pick the first one.
wheel_to_test = repaired_wheels[0]
native_docker.call(
["pip", "install", str(wheel_to_test) + options.test_extras],
env=virtualenv_env,
)
# Install any requirements to run the tests
if options.test_requires:
native_docker.call(
["pip", "install", *options.test_requires], env=virtualenv_env
)
# Run the tests from a different directory
test_command_prepared = prepare_command(
options.test_command,
project=container_project_path,
package=container_package_dir,
)
native_docker.call(
["sh", "-c", test_command_prepared], cwd="/root", env=virtualenv_env
)
# clean up test environment
native_docker.call(["rm", "-rf", venv_dir])
# move repaired wheels to output
docker.call(["mkdir", "-p", container_output_dir])
docker.call(["mv", *repaired_wheels, container_output_dir])
log.build_end()
log.step("Copying wheels back to host...")
# copy the output back into the host
docker.copy_out(container_output_dir, options.output_dir)
log.step_end()
except subprocess.CalledProcessError as error:
log.step_end_with_error(
f"Command {error.cmd} failed with code {error.returncode}. {error.stdout}"
)
troubleshoot(options.package_dir, error)
sys.exit(1)
| def build(options: BuildOptions) -> None:
try:
# check docker is installed
subprocess.run(["docker", "--version"], check=True, stdout=subprocess.DEVNULL)
except Exception:
print(
"cibuildwheel: Docker not found. Docker is required to run Linux builds. "
"If you're building on Travis CI, add `services: [docker]` to your .travis.yml."
"If you're building on Circle CI in Linux, add a `setup_remote_docker` step to your .circleci/config.yml",
file=sys.stderr,
)
sys.exit(2)
assert options.manylinux_images is not None
python_configurations = get_python_configurations(options.build_selector, options.architectures)
platforms = [
("cp", "manylinux_aarch64", options.manylinux_images["cross_aarch64"]),
]
cwd = Path.cwd()
abs_package_dir = options.package_dir.resolve()
if cwd != abs_package_dir and cwd not in abs_package_dir.parents:
raise Exception("package_dir must be inside the working directory")
container_project_path = PurePath("/project")
container_package_dir = container_project_path / abs_package_dir.relative_to(cwd)
container_output_dir = PurePath("/output")
print("\nRegistering qemu to run ppc64le/AArch64 docker containers...\n")
setup_qemu()
for implementation, platform_tag, docker_image in platforms:
platform_configs = [
c
for c in python_configurations
if c.identifier.startswith(implementation) and c.identifier.endswith(platform_tag)
]
if not platform_configs:
continue
try:
log.step(f"Starting Docker image {docker_image}...")
with DockerContainer(
docker_image,
simulate_32_bit=platform_tag.endswith("i686"),
cwd=container_project_path,
) as docker:
log.step("Copying project into Docker...")
docker.copy_into(Path.cwd(), container_project_path)
target_arch = platform_tag_to_arch(platform_tag)
if options.before_all:
log.step("Running before_all...")
env = docker.get_environment()
env["PATH"] = f'/opt/python/cp38-cp38/bin:{env["PATH"]}'
env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
env = options.environment.as_dictionary(
env, executor=docker.environment_executor
)
before_all_prepared = prepare_command(
options.before_all,
project=container_project_path,
package=container_package_dir,
)
execute_cmd(
docker=docker,
cmd_str=before_all_prepared,
before_build=False,
target_arch=target_arch,
env=env,
)
for config in platform_configs:
log.build_start(config.identifier)
dependency_constraint_flags: List[PathOrStr] = []
if options.dependency_constraints:
constraints_file = options.dependency_constraints.get_for_python_version(
config.version
)
container_constraints_file = PurePath("/constraints.txt")
docker.copy_into(constraints_file, container_constraints_file)
dependency_constraint_flags = ["-c", container_constraints_file]
log.step("Setting up build environment...")
env = docker.get_environment()
# put this config's python top of the list
python_bin = config.path / "bin"
env["PATH"] = f'{python_bin}:{env["PATH"]}'
cross_py = str(config.path)
build_py = cross_py[: cross_py.rindex("/")]
build_py_bin = f"{build_py}/bin"
env["PATH"] = f"{build_py_bin}:{env['PATH']}"
env = options.environment.as_dictionary(
env, executor=docker.environment_executor
)
# check config python is still on PATH
which_python = docker.call(
["which", "python"], env=env, capture_output=True
).strip()
if PurePath(which_python) != python_bin / "python":
print(
"cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
file=sys.stderr,
)
sys.exit(1)
which_pip = docker.call(["which", "pip"], env=env, capture_output=True).strip()
if PurePath(which_pip) != python_bin / "pip":
print(
"cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.",
file=sys.stderr,
)
sys.exit(1)
if options.before_build:
log.step("Running before_build...")
before_build_prepared = prepare_command(
options.before_build,
project=container_project_path,
package=container_package_dir,
)
execute_cmd(
docker=docker,
cmd_str=before_build_prepared,
before_build=True,
target_arch=target_arch,
env=env,
)
log.step("Building wheel...")
temp_dir = PurePath("/tmp/cibuildwheel")
built_wheel_dir = temp_dir / "built_wheel"
docker.call(["rm", "-rf", built_wheel_dir])
docker.call(["mkdir", "-p", built_wheel_dir])
verbosity_flags = get_build_verbosity_extra_flags(options.build_verbosity)
# pip wheel is not working properly with crossenv, use "bdist_wheel" for now
docker.call(
[
"python",
"setup.py",
"bdist_wheel",
f"--dist-dir={built_wheel_dir}",
*verbosity_flags,
],
env=env,
cwd=container_package_dir,
)
built_wheel = docker.glob(built_wheel_dir, "*.whl")[0]
repaired_wheel_dir = temp_dir / "repaired_wheel"
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
# Because we will repair the wheel in a different container, we need to
# changing the path with respect host machine. We will copy the built
# wheels on host machine before accessing these
built_wheel = PurePath(target_arch_env.host + built_wheel.__str__())
repaired_wheel_dir = PurePath(
target_arch_env.host + repaired_wheel_dir.__str__()
)
docker.call(["rm", "-rf", repaired_wheel_dir])
docker.call(["mkdir", "-p", repaired_wheel_dir])
if built_wheel.name.endswith("none-any.whl"):
raise NonPlatformWheelError()
# We will repair the wheel in a different environment, copy the
# built wheels back on host machine along with the script used to
# repair the wheel
docker.call(
["cp", "-r", temp_dir, target_arch_env.host_machine_tmp_in_container]
)
docker.call(
[
"cp",
f"{target_arch_env.tmp}/repair_wheel.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
with DockerContainer(
native_docker_images[target_arch],
simulate_32_bit=platform_tag.endswith("i686"),
cwd=container_project_path,
) as native_docker:
if options.repair_command:
log.step("Repairing wheel...")
repair_command_prepared = prepare_command(
options.repair_command,
wheel=built_wheel,
dest_dir=repaired_wheel_dir,
)
# Repair the wheel in a architecture specific container
native_docker.call(
[
target_arch_env.host_machine_tmp_in_container
+ "/repair_wheel.sh",
target_arch_env.host_machine_deps_in_container,
repair_command_prepared,
]
)
else:
native_docker.call(["mv", built_wheel, repaired_wheel_dir])
repaired_wheels = native_docker.glob(repaired_wheel_dir, "*.whl")
if options.test_command and options.test_selector(config.identifier):
log.step("Testing wheel...")
# We are testing in a different container so we need to copy the
# project and constraints file into it.
native_docker.copy_into(Path.cwd(), container_project_path)
native_docker.copy_into(constraints_file, container_constraints_file)
# Setting the path to current python version
envxc = native_docker.get_environment()
path = env["PATH"].replace("-xc", "")
envxc["PATH"] = f"{path}:envxc['PATH']"
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
native_docker.call(
["pip", "install", "virtualenv", *dependency_constraint_flags],
env=envxc,
)
venv_dir = (
PurePath(
native_docker.call(
["mktemp", "-d"], capture_output=True
).strip()
)
/ "venv"
)
native_docker.call(
["python", "-m", "virtualenv", "--no-download", venv_dir], env=envxc
)
virtualenv_env = envxc.copy()
virtualenv_env["PATH"] = f"{venv_dir / 'bin'}:{virtualenv_env['PATH']}"
if options.before_test:
before_test_prepared = prepare_command(
options.before_test,
project=container_project_path,
package=container_package_dir,
)
native_docker.call(
["sh", "-c", before_test_prepared], env=virtualenv_env
)
# Install the wheel we just built
# Note: If auditwheel produced two wheels, it's because the earlier produced wheel
# conforms to multiple manylinux standards. These multiple versions of the wheel are
# functionally the same, differing only in name, wheel metadata, and possibly include
# different external shared libraries. so it doesn't matter which one we run the tests on.
# Let's just pick the first one.
wheel_to_test = repaired_wheels[0]
native_docker.call(
["pip", "install", str(wheel_to_test) + options.test_extras],
env=virtualenv_env,
)
# Install any requirements to run the tests
if options.test_requires:
native_docker.call(
["pip", "install", *options.test_requires], env=virtualenv_env
)
# Run the tests from a different directory
test_command_prepared = prepare_command(
options.test_command,
project=container_project_path,
package=container_package_dir,
)
native_docker.call(
["sh", "-c", test_command_prepared], cwd="/root", env=virtualenv_env
)
# clean up test environment
native_docker.call(["rm", "-rf", venv_dir])
# move repaired wheels to output
docker.call(["mkdir", "-p", container_output_dir])
docker.call(["mv", *repaired_wheels, container_output_dir])
log.build_end()
log.step("Copying wheels back to host...")
# copy the output back into the host
docker.copy_out(container_output_dir, options.output_dir)
log.step_end()
except subprocess.CalledProcessError as error:
log.step_end_with_error(
f"Command {error.cmd} failed with code {error.returncode}. {error.stdout}"
)
troubleshoot(options.package_dir, error)
sys.exit(1)
|
56,215 | def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
mask_rcnn_model_xml = args.mask_rcnn_model
mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin'
text_enc_model_xml = args.text_enc_model
text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin'
text_dec_model_xml = args.text_dec_model
text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin'
# Plugin initialization for specified device and load extensions library if specified.
log.info('Creating Inference Engine...')
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
# Read IR
log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin))
mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin))
text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin))
text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin)
if 'CPU' in args.device:
supported_layers = ie.query_network(mask_rcnn_net, 'CPU')
not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error('Following layers are not supported by the plugin for specified device {}:\n {}'.
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
required_input_keys = {'im_data', 'im_info'}
assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \
'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys))
required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'}
assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \
'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys))
n, c, h, w = mask_rcnn_net.inputs['im_data'].shape
print(n, c, h, w)
assert n == 1, 'Only batch 1 is supported by the demo application'
log.info('Loading IR to the plugin...')
mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2)
text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device)
text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device)
hidden_shape = text_dec_net.inputs[args.trd_input_prev_hidden].shape
del mask_rcnn_net
del text_enc_net
del text_dec_net
try:
input_source = int(args.input_source)
except ValueError:
input_source = args.input_source
if os.path.isdir(input_source):
cap = FolderCapture(input_source)
else:
cap = cv2.VideoCapture(input_source)
if not cap.isOpened():
log.error('Failed to open "{}"'.format(args.input_source))
if isinstance(cap, cv2.VideoCapture):
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if args.no_track:
tracker = None
else:
tracker = StaticIOUTracker()
visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores)
render_time = 0
log.info('Starting inference...')
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if not args.keep_aspect_ratio:
# Resize the image to a target size.
scale_x = w / frame.shape[1]
scale_y = h / frame.shape[0]
input_image = cv2.resize(frame, (w, h))
else:
# Resize the image to keep the same aspect ratio and to fit it to a window of a target size.
scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])
input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)
input_image_size = input_image.shape[:2]
input_image = np.pad(input_image, ((0, h - input_image_size[0]),
(0, w - input_image_size[1]),
(0, 0)),
mode='constant', constant_values=0)
# Change data layout from HWC to CHW.
input_image = input_image.transpose((2, 0, 1))
input_image = input_image.reshape((n, c, h, w)).astype(np.float32)
input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
# Run the net.
inf_start = time.time()
outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info})
inf_end = time.time()
det_time = inf_end - inf_start
# Parse detection results of the current request
boxes = outputs['boxes']
boxes[:, 0::2] /= scale_x
boxes[:, 1::2] /= scale_y
scores = outputs['scores']
classes = outputs['classes'].astype(np.uint32)
masks = []
for box, cls, raw_mask in zip(boxes, classes, outputs['raw_masks']):
raw_cls_mask = raw_mask[cls, ...]
mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1])
masks.append(mask)
text_features = outputs['text_features']
# Filter out detections with low confidence.
detections_filter = scores > args.prob_threshold
scores = scores[detections_filter]
classes = classes[detections_filter]
boxes = boxes[detections_filter]
masks = list(segm for segm, is_valid in zip(masks, detections_filter) if is_valid)
text_features = text_features[detections_filter]
texts = []
for feature in text_features:
feature = text_enc_exec_net.infer({'input': feature})['output']
feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
vocab_size = len(args.alphabet)
per_feature_outputs = np.zeros([MAX_SEQ_LEN, vocab_size])
text = ''
for i in range(MAX_SEQ_LEN):
decoder_output = text_dec_exec_net.infer({
args.trd_input_prev_symbol: prev_symbol_index,
args.trd_input_prev_hidden: hidden,
args.trd_input_encoder_outputs: feature})
symbols_distr = decoder_output[args.trd_output_symbols_distr]
per_feature_outputs[i] = symbols_distr
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += args.alphabet[prev_symbol_index]
hidden = decoder_output[args.trd_output_cur_hidden]
texts.append(text)
render_start = time.time()
if len(boxes) and args.raw_output_message:
log.info('Detected boxes:')
log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ')
for box, cls, score, mask in zip(boxes, classes, scores, masks):
log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box))
# Get instance track IDs.
masks_tracks_ids = None
if tracker is not None:
masks_tracks_ids = tracker(masks, classes)
# Visualize masks.
frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids)
# Draw performance stats.
inf_time_message = 'Inference time: {:.3f} ms'.format(det_time * 1000)
render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000)
cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
# Print performance counters.
if args.perf_counts:
perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts()
log.info('Performance counters:')
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status',
'real_time, us'))
for layer, stats in perf_counts.items():
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
if not args.no_show:
# Show resulting image.
cv2.imshow('Results', frame)
render_end = time.time()
render_time = render_end - render_start
if not args.no_show:
key = cv2.waitKey(args.delay)
esc_code = 27
if key == esc_code:
break
cv2.destroyAllWindows()
cap.release()
del mask_rcnn_exec_net
del ie
| def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
mask_rcnn_model_xml = args.mask_rcnn_model
mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin'
text_enc_model_xml = args.text_enc_model
text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin'
text_dec_model_xml = args.text_dec_model
text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin'
# Plugin initialization for specified device and load extensions library if specified.
log.info('Creating Inference Engine...')
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
# Read IR
log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin))
mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin))
text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin))
text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin)
if 'CPU' in args.device:
supported_layers = ie.query_network(mask_rcnn_net, 'CPU')
not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error('Following layers are not supported by the plugin for specified device {}:\n {}'.
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
required_input_keys = {'im_data', 'im_info'}
assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \
'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys))
required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'}
assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \
'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys))
n, c, h, w = mask_rcnn_net.inputs['im_data'].shape
assert n == 1, 'Only batch 1 is supported by the demo application'
log.info('Loading IR to the plugin...')
mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2)
text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device)
text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device)
hidden_shape = text_dec_net.inputs[args.trd_input_prev_hidden].shape
del mask_rcnn_net
del text_enc_net
del text_dec_net
try:
input_source = int(args.input_source)
except ValueError:
input_source = args.input_source
if os.path.isdir(input_source):
cap = FolderCapture(input_source)
else:
cap = cv2.VideoCapture(input_source)
if not cap.isOpened():
log.error('Failed to open "{}"'.format(args.input_source))
if isinstance(cap, cv2.VideoCapture):
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if args.no_track:
tracker = None
else:
tracker = StaticIOUTracker()
visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores)
render_time = 0
log.info('Starting inference...')
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if not args.keep_aspect_ratio:
# Resize the image to a target size.
scale_x = w / frame.shape[1]
scale_y = h / frame.shape[0]
input_image = cv2.resize(frame, (w, h))
else:
# Resize the image to keep the same aspect ratio and to fit it to a window of a target size.
scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])
input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)
input_image_size = input_image.shape[:2]
input_image = np.pad(input_image, ((0, h - input_image_size[0]),
(0, w - input_image_size[1]),
(0, 0)),
mode='constant', constant_values=0)
# Change data layout from HWC to CHW.
input_image = input_image.transpose((2, 0, 1))
input_image = input_image.reshape((n, c, h, w)).astype(np.float32)
input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
# Run the net.
inf_start = time.time()
outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info})
inf_end = time.time()
det_time = inf_end - inf_start
# Parse detection results of the current request
boxes = outputs['boxes']
boxes[:, 0::2] /= scale_x
boxes[:, 1::2] /= scale_y
scores = outputs['scores']
classes = outputs['classes'].astype(np.uint32)
masks = []
for box, cls, raw_mask in zip(boxes, classes, outputs['raw_masks']):
raw_cls_mask = raw_mask[cls, ...]
mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1])
masks.append(mask)
text_features = outputs['text_features']
# Filter out detections with low confidence.
detections_filter = scores > args.prob_threshold
scores = scores[detections_filter]
classes = classes[detections_filter]
boxes = boxes[detections_filter]
masks = list(segm for segm, is_valid in zip(masks, detections_filter) if is_valid)
text_features = text_features[detections_filter]
texts = []
for feature in text_features:
feature = text_enc_exec_net.infer({'input': feature})['output']
feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
vocab_size = len(args.alphabet)
per_feature_outputs = np.zeros([MAX_SEQ_LEN, vocab_size])
text = ''
for i in range(MAX_SEQ_LEN):
decoder_output = text_dec_exec_net.infer({
args.trd_input_prev_symbol: prev_symbol_index,
args.trd_input_prev_hidden: hidden,
args.trd_input_encoder_outputs: feature})
symbols_distr = decoder_output[args.trd_output_symbols_distr]
per_feature_outputs[i] = symbols_distr
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += args.alphabet[prev_symbol_index]
hidden = decoder_output[args.trd_output_cur_hidden]
texts.append(text)
render_start = time.time()
if len(boxes) and args.raw_output_message:
log.info('Detected boxes:')
log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ')
for box, cls, score, mask in zip(boxes, classes, scores, masks):
log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box))
# Get instance track IDs.
masks_tracks_ids = None
if tracker is not None:
masks_tracks_ids = tracker(masks, classes)
# Visualize masks.
frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids)
# Draw performance stats.
inf_time_message = 'Inference time: {:.3f} ms'.format(det_time * 1000)
render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000)
cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
# Print performance counters.
if args.perf_counts:
perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts()
log.info('Performance counters:')
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status',
'real_time, us'))
for layer, stats in perf_counts.items():
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
if not args.no_show:
# Show resulting image.
cv2.imshow('Results', frame)
render_end = time.time()
render_time = render_end - render_start
if not args.no_show:
key = cv2.waitKey(args.delay)
esc_code = 27
if key == esc_code:
break
cv2.destroyAllWindows()
cap.release()
del mask_rcnn_exec_net
del ie
|
20,054 | def _name_to_id(
dataset_name: str, version: Optional[int] = None, error_if_multiple: bool = False
) -> int:
""" Attempt to find the dataset id of the dataset with the given name.
If multiple datasets with the name exist, and ``error_if_multiple`` is ``False``,
then return the least recent still active dataset.
Raises an error if no dataset with the name is found.
Raises an error if a version is specified but it could not be found.
Parameters
----------
dataset_name : str
The name of the dataset for which to find its id.
version : int
Version to retrieve. If not specified, the oldest active version is returned.
error_if_multiple : bool (default=False)
If `False`, if multiple datasets match, return the least recent active dataset.
If `True`, if multiple datasets match, raise an error.
download_qualities : bool, optional
If `True`, also download qualities.xml file. If false use the file if it was cached.
Returns
-------
int
The id of the dataset.
"""
status = None if version is not None else "active"
candidates = list_datasets(data_name=dataset_name, status=status, data_version=version)
if error_if_multiple and len(candidates) > 1:
raise ValueError("Multiple active datasets exist with name {}".format(dataset_name))
if len(candidates) == 0:
no_dataset_for_name = "No active datasets exist with name {}".format(dataset_name)
and_version = " and version {}".format(version) if version is not None else ""
raise RuntimeError(no_dataset_for_name + and_version)
# Dataset ids are chronological so we can just sort based on ids (instead of version)
return sorted(candidates)[0]
| def _name_to_id(
dataset_name: str, version: Optional[int] = None, error_if_multiple: bool = False
) -> int:
""" Attempt to find the dataset id of the dataset with the given name.
If multiple datasets with the name exist, and ``error_if_multiple`` is ``False``,
then return the least recent still active dataset.
Raises an error if no dataset with the name is found.
Raises an error if a version is specified but it could not be found.
Parameters
----------
dataset_name : str
The name of the dataset for which to find its id.
version : int
Version to retrieve. If not specified, the oldest active version is returned.
error_if_multiple : bool (default=False)
If `False`, if multiple datasets match, return the least recent active dataset.
If `True`, if multiple datasets match, raise an error.
download_qualities : bool, optional (default=True)
If `True`, also download qualities.xml file. If false use the file if it was cached.
Returns
-------
int
The id of the dataset.
"""
status = None if version is not None else "active"
candidates = list_datasets(data_name=dataset_name, status=status, data_version=version)
if error_if_multiple and len(candidates) > 1:
raise ValueError("Multiple active datasets exist with name {}".format(dataset_name))
if len(candidates) == 0:
no_dataset_for_name = "No active datasets exist with name {}".format(dataset_name)
and_version = " and version {}".format(version) if version is not None else ""
raise RuntimeError(no_dataset_for_name + and_version)
# Dataset ids are chronological so we can just sort based on ids (instead of version)
return sorted(candidates)[0]
|
51,418 | def _shift_month(date, months, day_option="start"):
"""Shift the date to a month start or end a given number of months away.
"""
delta_year = (date.month + months) // 12
month = (date.month + months) % 12
if month == 0:
month = 12
delta_year = delta_year - 1
year = date.year + delta_year
if day_option == "start":
day = 1
elif day_option == "end":
reference = type(date)(year, month, 1)
day = _days_in_month(reference)
else:
raise ValueError(day_option)
return date.replace(year=year, month=month, day=day)
| def _shift_month(date, months, day_option="start"):
"""Shift the date to a month start or end a given number of months away.
"""
delta_year = (date.month + months) // 12
month = (date.month + months) % 12
if month == 0:
month = 12
delta_year = delta_year - 1
year = date.year + delta_year
if day_option == "start":
day = 1
elif day_option == "end":
reference = type(date)(year, month, 1)
day = _days_in_month(reference)
else:
raise ValueError(day_option)
if LooseVersion(cftime.__version__) < LooseVersion("1.0.4"):
# dayofwk=-1 is required to update the dayofwk and dayofyr attributes of
# the returned date object in versions of cftime between 1.0.2 and
# 1.0.3.4. It can be removed for versions of cftime greater than
# 1.0.3.4.
return date.replace(year=year, month=month, day=day, dayofwk=-1)
else:
return date.replace(year=year, month=month, day=day)
|
20,010 | def analyze_index(index_array, mask, histplot=False, bins=100, max_bin=None, min_bin=None):
"""This extracts the hyperspectral index statistics and writes the values as observations out to
the Outputs class.
Inputs:
index_array = Instance of the Spectral_data class, usually the output from pcv.hyperspectral.extract_index
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
bins = optional, number of classes to divide spectrum into
bin_max = optional, maximum bin value
bin_min = optional, minimum bin value
:param array: __main__.Spectral_data
:param mask: numpy array
:param histplot: bool
:param bins: int
:param max_bin: float
:param min_bin: float
:return analysis_image: list
"""
params.device += 1
debug = params.debug
params.debug = None
analysis_image = None
if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:
fatal_error("Mask should be a binary image of 0 and nonzero values.")
if len(np.shape(index_array.array_data)) > 2:
fatal_error("index_array data should be a grayscale image.")
# Mask data and collect statistics about pixels within the masked image
masked_array = index_array.array_data[np.where(mask > 0)]
index_mean = np.average(masked_array)
index_median = np.median(masked_array)
index_std = np.std(masked_array)
maxval = round(np.amax(masked_array), 8) # Auto bins will detect maxval to use for calculating centers
b = 0 # Auto bins will always start from 0
if not max_bin == None:
maxval = max_bin # If bin_max is defined then overwrite maxval variable
if not min_bin == None:
b = min_bin # If bin_min is defined then overwrite starting value
# Calculate histogram
hist_val = [float(l[0]) for l in cv2.calcHist([masked_array.astype(np.float32)], [0], None, [bins], [0, 1])]
bin_width = (maxval - b) / float(bins)
bin_labels = [float(b)]
plotting_labels = [float(b)]
for i in range(bins - 1):
b += bin_width
bin_labels.append(b)
plotting_labels.append(round(b, 2))
# Make hist percentage for plotting
pixels = cv2.countNonZero(mask)
hist_percent = [(p / float(pixels)) * 100 for p in hist_val]
params.debug = debug
if histplot is True:
dataset = pd.DataFrame({'Index Reflectance': bin_labels,
'Proportion of pixels (%)': hist_percent})
fig_hist = (ggplot(data=dataset,
mapping=aes(x='Index Reflectance',
y='Proportion of pixels (%)'))
+ geom_line(color='red')
+ scale_x_continuous(breaks=bin_labels, labels=plotting_labels))
analysis_image = fig_hist
if params.debug == 'print':
fig_hist.save(os.path.join(params.debug_outdir, str(params.device) +
index_array.array_type + "hist.png"))
elif params.debug == 'plot':
print(fig_hist)
outputs.add_observation(variable='mean_' + index_array.array_type,
trait='Average ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_mean), label='none')
outputs.add_observation(variable='med_' + index_array.array_type,
trait='Median ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_median), label='none')
outputs.add_observation(variable='std_' + index_array.array_type,
trait='Standard deviation ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_std), label='none')
outputs.add_observation(variable='index_frequencies_' + index_array.array_type, trait='index frequencies',
method='plantcv.plantcv.analyze_index', scale='frequency', datatype=list,
value=hist_percent, label=bin_labels)
if params.debug == "plot":
plot_image(masked_array)
elif params.debug == "print":
print_image(img=masked_array, filename=os.path.join(params.debug_outdir, str(params.device) +
index_array.array_type + ".png"))
# Store images
outputs.images.append(analysis_image)
return analysis_image
| def analyze_index(index_array, mask, histplot=False, bins=100, min_bin=None, max_bin=None):
"""This extracts the hyperspectral index statistics and writes the values as observations out to
the Outputs class.
Inputs:
index_array = Instance of the Spectral_data class, usually the output from pcv.hyperspectral.extract_index
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
bins = optional, number of classes to divide spectrum into
bin_max = optional, maximum bin value
bin_min = optional, minimum bin value
:param array: __main__.Spectral_data
:param mask: numpy array
:param histplot: bool
:param bins: int
:param max_bin: float
:param min_bin: float
:return analysis_image: list
"""
params.device += 1
debug = params.debug
params.debug = None
analysis_image = None
if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:
fatal_error("Mask should be a binary image of 0 and nonzero values.")
if len(np.shape(index_array.array_data)) > 2:
fatal_error("index_array data should be a grayscale image.")
# Mask data and collect statistics about pixels within the masked image
masked_array = index_array.array_data[np.where(mask > 0)]
index_mean = np.average(masked_array)
index_median = np.median(masked_array)
index_std = np.std(masked_array)
maxval = round(np.amax(masked_array), 8) # Auto bins will detect maxval to use for calculating centers
b = 0 # Auto bins will always start from 0
if not max_bin == None:
maxval = max_bin # If bin_max is defined then overwrite maxval variable
if not min_bin == None:
b = min_bin # If bin_min is defined then overwrite starting value
# Calculate histogram
hist_val = [float(l[0]) for l in cv2.calcHist([masked_array.astype(np.float32)], [0], None, [bins], [0, 1])]
bin_width = (maxval - b) / float(bins)
bin_labels = [float(b)]
plotting_labels = [float(b)]
for i in range(bins - 1):
b += bin_width
bin_labels.append(b)
plotting_labels.append(round(b, 2))
# Make hist percentage for plotting
pixels = cv2.countNonZero(mask)
hist_percent = [(p / float(pixels)) * 100 for p in hist_val]
params.debug = debug
if histplot is True:
dataset = pd.DataFrame({'Index Reflectance': bin_labels,
'Proportion of pixels (%)': hist_percent})
fig_hist = (ggplot(data=dataset,
mapping=aes(x='Index Reflectance',
y='Proportion of pixels (%)'))
+ geom_line(color='red')
+ scale_x_continuous(breaks=bin_labels, labels=plotting_labels))
analysis_image = fig_hist
if params.debug == 'print':
fig_hist.save(os.path.join(params.debug_outdir, str(params.device) +
index_array.array_type + "hist.png"))
elif params.debug == 'plot':
print(fig_hist)
outputs.add_observation(variable='mean_' + index_array.array_type,
trait='Average ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_mean), label='none')
outputs.add_observation(variable='med_' + index_array.array_type,
trait='Median ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_median), label='none')
outputs.add_observation(variable='std_' + index_array.array_type,
trait='Standard deviation ' + index_array.array_type + ' reflectance',
method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,
value=float(index_std), label='none')
outputs.add_observation(variable='index_frequencies_' + index_array.array_type, trait='index frequencies',
method='plantcv.plantcv.analyze_index', scale='frequency', datatype=list,
value=hist_percent, label=bin_labels)
if params.debug == "plot":
plot_image(masked_array)
elif params.debug == "print":
print_image(img=masked_array, filename=os.path.join(params.debug_outdir, str(params.device) +
index_array.array_type + ".png"))
# Store images
outputs.images.append(analysis_image)
return analysis_image
|
32,089 | def test_mod():
"""Simple test function to verify it works from the BYOI screen"""
URL_SUFFIX = BASE_URL + "breach/data/domains/google.com/"
resp = requests.get(URL_SUFFIX, headers=headers, timeout=30)
if resp.status_code == 200:
demisto.results('ok')
else:
demisto.results('not ok')
| def test_module():
"""Simple test function to verify it works from the BYOI screen"""
URL_SUFFIX = BASE_URL + "breach/data/domains/google.com/"
resp = requests.get(URL_SUFFIX, headers=headers, timeout=30)
if resp.status_code == 200:
demisto.results('ok')
else:
demisto.results('not ok')
|
8,399 | def template_redshift(observed_spectrum, template_spectrum, redshift):
"""
Find the best-fit redshift for template_spectrum to match observed_spectrum using chi2.
Parameters
----------
observed_spectrum : :class:`~specutils.Spectrum1D`
The observed spectrum.
template_spectrum : :class:`~specutils.Spectrum1D`
The template spectrum, which will have it's redshift calculated.
redshift : `list`, `tuple`, 'numpy.array`
An iterable with the redshift values to test.
Returns
-------
final_redshift : `float`
The best-fit redshift for template_spectrum to match the observed_spectrum.
redshifted_spectrum: :class:`~specutils.Spectrum1D`
A new Spectrum1D object which incorporates the template_spectrum with a spectral_axis
that has been redshifted using the final_redshift.
chi2_list : `list`
A list with the chi2 values corresponding to each input redshift value.
"""
chi2_min = None
final_redshift = None
chi2_list = []
# Loop which goes through available redshift values and finds the smallest chi2
for rs in redshift:
# Create new redshifted spectrum and run it through the chi2 method
redshifted_spectrum = Spectrum1D(spectral_axis=template_spectrum.spectral_axis*(1+rs),
flux=template_spectrum.flux, uncertainty=template_spectrum.uncertainty,
meta=template_spectrum.meta)
normalized_spectral_template, chi2 = _chi_square_for_templates(
observed_spectrum, redshifted_spectrum, "flux_conserving")
chi2_list.append(chi2)
# Set new chi2_min if suitable replacement is found
if not np.isnan(chi2) and (chi2_min is None or chi2 < chi2_min):
chi2_min = chi2
final_redshift = rs
return final_redshift, redshifted_spectrum, chi2_list
| def template_redshift(observed_spectrum, template_spectrum, redshift):
"""
Find the best-fit redshift for template_spectrum to match observed_spectrum using chi2.
Parameters
----------
observed_spectrum : :class:`~specutils.Spectrum1D`
The observed spectrum.
template_spectrum : :class:`~specutils.Spectrum1D`
The template spectrum, which will have it's redshift calculated.
redshift : `float`, `list`, `tuple`, 'numpy.array`
An iterable with the redshift values to test.
Returns
-------
final_redshift : `float`
The best-fit redshift for template_spectrum to match the observed_spectrum.
redshifted_spectrum: :class:`~specutils.Spectrum1D`
A new Spectrum1D object which incorporates the template_spectrum with a spectral_axis
that has been redshifted using the final_redshift.
chi2_list : `list`
A list with the chi2 values corresponding to each input redshift value.
"""
chi2_min = None
final_redshift = None
chi2_list = []
# Loop which goes through available redshift values and finds the smallest chi2
for rs in redshift:
# Create new redshifted spectrum and run it through the chi2 method
redshifted_spectrum = Spectrum1D(spectral_axis=template_spectrum.spectral_axis*(1+rs),
flux=template_spectrum.flux, uncertainty=template_spectrum.uncertainty,
meta=template_spectrum.meta)
normalized_spectral_template, chi2 = _chi_square_for_templates(
observed_spectrum, redshifted_spectrum, "flux_conserving")
chi2_list.append(chi2)
# Set new chi2_min if suitable replacement is found
if not np.isnan(chi2) and (chi2_min is None or chi2 < chi2_min):
chi2_min = chi2
final_redshift = rs
return final_redshift, redshifted_spectrum, chi2_list
|
1,221 | def test_tvfile_io():
# Test reading and writing tracks with file class
out_f = BytesIO()
ijk0 = np.arange(15).reshape((5, 3)) / 2.0
ijk1 = ijk0 + 20
vx_streams = [(ijk0, None, None), (ijk1, None, None)]
vxmm_streams = [(ijk0 * [[2, 3, 4]], None, None),
(ijk1 * [[2, 3, 4]], None, None)]
# Roundtrip basic
tvf = tv.TrackvisFile(vxmm_streams)
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f)
assert tvf2.filename == None
assert streamlist_equal(vxmm_streams, tvf2.streamlines)
assert tvf2.points_space == None
# Voxel points_space
tvf = tv.TrackvisFile(vx_streams, points_space='voxel')
out_f.seek(0)
# No voxel size - error
with pytest.raises(tv.HeaderError):
tvf.to_file(out_f)
out_f.seek(0)
# With voxel size, no error, roundtrip works
tvf.header['voxel_size'] = [2, 3, 4]
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel')
assert streamlist_equal(vx_streams, tvf2.streamlines)
assert tvf2.points_space == 'voxel'
out_f.seek(0)
# Also with affine specified
tvf = tv.TrackvisFile(vx_streams, points_space='voxel',
affine=np.diag([2, 3, 4, 1]))
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel')
assert streamlist_equal(vx_streams, tvf2.streamlines)
# Fancy affine test
fancy_affine = np.array([[0., -2, 0, 10],
[3, 0, 0, 20],
[0, 0, 4, 30],
[0, 0, 0, 1]])
def f(pts): # from vx to mm
pts = pts[:, [1, 0, 2]] * [[-2, 3, 4]] # apply zooms / reorder
return pts + [[10, 20, 30]] # apply translations
xyz0, xyz1 = f(ijk0), f(ijk1)
fancy_rasmm_streams = [(xyz0, None, None), (xyz1, None, None)]
# Roundtrip
tvf = tv.TrackvisFile(fancy_rasmm_streams, points_space='rasmm')
out_f.seek(0)
# No affine
with pytest.raises(tv.HeaderError):
tvf.to_file(out_f)
out_f.seek(0)
# With affine set, no error, roundtrip works
tvf.set_affine(fancy_affine, pos_vox=True, set_order=True)
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='rasmm')
assert streamlist_equal(fancy_rasmm_streams, tvf2.streamlines)
assert tvf2.points_space == 'rasmm'
out_f.seek(0)
# Also when affine given in init
tvf = tv.TrackvisFile(fancy_rasmm_streams, points_space='rasmm',
affine=fancy_affine)
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='rasmm')
assert streamlist_equal(fancy_rasmm_streams, tvf2.streamlines)
| def test_tvfile_io():
# Test reading and writing tracks with file class
out_f = BytesIO()
ijk0 = np.arange(15).reshape((5, 3)) / 2.0
ijk1 = ijk0 + 20
vx_streams = [(ijk0, None, None), (ijk1, None, None)]
vxmm_streams = [(ijk0 * [[2, 3, 4]], None, None),
(ijk1 * [[2, 3, 4]], None, None)]
# Roundtrip basic
tvf = tv.TrackvisFile(vxmm_streams)
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f)
assert tvf2.filename == None
assert streamlist_equal(vxmm_streams, tvf2.streamlines)
assert tvf2.points_space is None
# Voxel points_space
tvf = tv.TrackvisFile(vx_streams, points_space='voxel')
out_f.seek(0)
# No voxel size - error
with pytest.raises(tv.HeaderError):
tvf.to_file(out_f)
out_f.seek(0)
# With voxel size, no error, roundtrip works
tvf.header['voxel_size'] = [2, 3, 4]
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel')
assert streamlist_equal(vx_streams, tvf2.streamlines)
assert tvf2.points_space == 'voxel'
out_f.seek(0)
# Also with affine specified
tvf = tv.TrackvisFile(vx_streams, points_space='voxel',
affine=np.diag([2, 3, 4, 1]))
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel')
assert streamlist_equal(vx_streams, tvf2.streamlines)
# Fancy affine test
fancy_affine = np.array([[0., -2, 0, 10],
[3, 0, 0, 20],
[0, 0, 4, 30],
[0, 0, 0, 1]])
def f(pts): # from vx to mm
pts = pts[:, [1, 0, 2]] * [[-2, 3, 4]] # apply zooms / reorder
return pts + [[10, 20, 30]] # apply translations
xyz0, xyz1 = f(ijk0), f(ijk1)
fancy_rasmm_streams = [(xyz0, None, None), (xyz1, None, None)]
# Roundtrip
tvf = tv.TrackvisFile(fancy_rasmm_streams, points_space='rasmm')
out_f.seek(0)
# No affine
with pytest.raises(tv.HeaderError):
tvf.to_file(out_f)
out_f.seek(0)
# With affine set, no error, roundtrip works
tvf.set_affine(fancy_affine, pos_vox=True, set_order=True)
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='rasmm')
assert streamlist_equal(fancy_rasmm_streams, tvf2.streamlines)
assert tvf2.points_space == 'rasmm'
out_f.seek(0)
# Also when affine given in init
tvf = tv.TrackvisFile(fancy_rasmm_streams, points_space='rasmm',
affine=fancy_affine)
tvf.to_file(out_f)
out_f.seek(0)
tvf2 = tv.TrackvisFile.from_file(out_f, points_space='rasmm')
assert streamlist_equal(fancy_rasmm_streams, tvf2.streamlines)
|
42,003 | def _interpolate_zmap(
zmap: Dict[complex, Union[int, float]], contour_plot_num: int
) -> Dict[complex, Union[int, float]]:
# implements interpolation algorithm used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
max_fractional_delta = 1.0
empties = _find_coordinates_where_empty(zmap, contour_plot_num)
# one pass to fill in a starting value for all the empties
zmap, _ = _run_iteration(zmap, empties)
for _ in range(NUM_OPTIMIZATION_ITERATIONS):
if max_fractional_delta > FRACTIONAL_DELTA_THRESHOLD:
# correct for overshoot and run again
max_fractional_delta = 0.5 - 0.25 * min(1, max_fractional_delta * 0.5)
zmatrix, max_fractional_delta = _run_iteration(zmap, empties, max_fractional_delta)
else:
break
return zmap
| def _interpolate_zmap(
zmap: Dict[complex, Union[int, float]], contour_plot_num: int
) -> None:
# implements interpolation algorithm used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
max_fractional_delta = 1.0
empties = _find_coordinates_where_empty(zmap, contour_plot_num)
# one pass to fill in a starting value for all the empties
zmap, _ = _run_iteration(zmap, empties)
for _ in range(NUM_OPTIMIZATION_ITERATIONS):
if max_fractional_delta > FRACTIONAL_DELTA_THRESHOLD:
# correct for overshoot and run again
max_fractional_delta = 0.5 - 0.25 * min(1, max_fractional_delta * 0.5)
zmatrix, max_fractional_delta = _run_iteration(zmap, empties, max_fractional_delta)
else:
break
return zmap
|
24,685 | def ordena_por_regiao(empresas):
por_regiao = {}
dados = empresas
for empresa in dados:
regiao = empresa['regiao']
estado = empresa['estado']
por_estado = por_regiao.get(regiao, {})
no_estado = por_estado.get(estado, [])
no_estado.append(empresa)
por_estado[estado] = no_estado
por_regiao[regiao] = por_estado
empresas = OrderedDict()
for regiao in sorted(por_regiao):
empresas[regiao] = OrderedDict()
for estado in sorted(por_regiao[regiao]):
no_estado = por_regiao[regiao][estado]
no_estado.sort(key=lambda x: x['nome'])
no_estado.sort(key=lambda x: x['cidade'])
empresas[regiao][estado] = no_estado
return empresas
| def ordena_por_regiao(empresas):
por_regiao = {}
for empresa in empresas:
regiao = empresa['regiao']
estado = empresa['estado']
por_estado = por_regiao.get(regiao, {})
no_estado = por_estado.get(estado, [])
no_estado.append(empresa)
por_estado[estado] = no_estado
por_regiao[regiao] = por_estado
empresas = OrderedDict()
for regiao in sorted(por_regiao):
empresas[regiao] = OrderedDict()
for estado in sorted(por_regiao[regiao]):
no_estado = por_regiao[regiao][estado]
no_estado.sort(key=lambda x: x['nome'])
no_estado.sort(key=lambda x: x['cidade'])
empresas[regiao][estado] = no_estado
return empresas
|
41,577 | def segment_volume(folder_model: str, fname_images: list, gpu_id: int = 0, options: dict = None):
"""Segment an image.
Segment an image (`fname_image`) using a pre-trained model (`folder_model`). If provided, a region of interest
(`fname_roi`) is used to crop the image prior to segment it.
Args:
folder_model (str): foldername which contains
(1) the model ('folder_model/folder_model.pt') to use
(2) its configuration file ('folder_model/folder_model.json') used for the training,
see https://github.com/neuropoly/ivadomed/wiki/configuration-file
fname_images (list): list of image filenames (e.g. .nii.gz) to segment. Multichannel models require multiple
images to segment, e.i., len(fname_images) > 1.
gpu_id (int): Number representing gpu number if available. Currently does NOT support multiple GPU segmentation.
options (dict): This can optionally contain any of the following key-value pairs:
* 'binarize_prediction': (float) Binarize segmentation with specified threshold. \
Predictions below the threshold become 0, and predictions above or equal to \
threshold become 1. Set to -1 for no thresholding (i.e., soft segmentation).
* 'binarize_maxpooling': (bool) Binarize by setting to 1 the voxel having the maximum prediction across \
all classes. Useful for multiclass models.
* 'fill_holes': (bool) Fill small holes in the segmentation.
* 'keep_largest': (bool) Keep the largest connected-object for each class from the output segmentation.
* 'remove_small': (list of str) Minimal object size to keep with unit (mm3 or vox). A single value can be provided \
or one value per prediction class. Single value example: ["1mm3"], ["5vox"]. Multiple values \
example: ["10", "20", "10vox"] (remove objects smaller than 10 voxels for class 1 and 3, \
and smaller than 20 voxels for class 2).
* 'pixel_size': (list of float) List of microscopy pixel size in micrometers. \
Length equals 2 [PixelSizeX, PixelSizeY] for 2D or 3 [PixelSizeX, PixelSizeY, PixelSizeZ] for 3D, \
where X is the width, Y the height and Z the depth of the image.
* 'pixel_size_units': (str) Units of pixel size (Must be either "mm", "um" or "nm")
* 'overlap_2D': (list of int) List of overlaps in pixels for 2D patching. Length equals 2 [OverlapX, OverlapY], \
where X is the width and Y the height of the image.
* 'metadata': (str) Film metadata.
* 'fname_prior': (str) An image filename (e.g., .nii.gz) containing processing information \
(e.g., spinal cord segmentation, spinal location or MS lesion classification, spinal cord centerline), \
used to crop the image prior to segment it if provided. \
The segmentation is not performed on the slices that are empty in this image.
Returns:
list, list: List of nibabel objects containing the soft segmentation(s), one per prediction class, \
List of target suffix associated with each prediction in `pred_list`
"""
# Check if model folder exists and get filenames to be stored as string
fname_model: str
fname_model_metadata: str
fname_model, fname_model_metadata = imed_models.get_model_filenames(folder_model)
# Load model training config
context = imed_config_manager.ConfigurationManager(fname_model_metadata).get_config()
postpro_list = ['binarize_prediction', 'binarize_maxpooling', 'keep_largest', ' fill_holes',
'remove_small']
if options is not None and any(pp in options for pp in postpro_list):
set_postprocessing_options(options, context)
# LOADER
loader_params = context[ConfigKW.LOADER_PARAMETERS]
slice_axis = imed_utils.AXIS_DCT[loader_params[LoaderParamsKW.SLICE_AXIS]]
metadata = {}
fname_roi = None
if (options is not None) and (OptionKW.FNAME_PRIOR in options):
fname_prior = options.get(OptionKW.FNAME_PRIOR)
else:
fname_prior = None
if fname_prior is not None:
if LoaderParamsKW.ROI_PARAMS in loader_params and loader_params[LoaderParamsKW.ROI_PARAMS][ROIParamsKW.SUFFIX] is not None:
fname_roi = fname_prior
# TRANSFORMATIONS
metadata = process_transformations(context, fname_roi, fname_prior, metadata, slice_axis, fname_images)
# Compose transforms
_, _, transform_test_params = imed_transforms.get_subdatasets_transforms(context[ConfigKW.TRANSFORMATION])
tranform_lst, undo_transforms = imed_transforms.prepare_transforms(transform_test_params)
# Force filter_empty_mask to False if fname_roi = None
if fname_roi is None and SliceFilterParamsKW.FILTER_EMPTY_MASK in loader_params[LoaderParamsKW.SLICE_FILTER_PARAMS]:
logger.warning("fname_roi has not been specified, then the entire volume is processed.")
loader_params[LoaderParamsKW.SLICE_FILTER_PARAMS][SliceFilterParamsKW.FILTER_EMPTY_MASK] = False
kernel_3D = bool(ConfigKW.MODIFIED_3D_UNET in context and context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.APPLIED]) or \
not context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.IS_2D]
# Assign length_2D and stride_2D for 2D patching
length_2D = context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.LENGTH_2D] if \
ModelParamsKW.LENGTH_2D in context[ConfigKW.DEFAULT_MODEL] else []
stride_2D = context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.STRIDE_2D] if \
ModelParamsKW.STRIDE_2D in context[ConfigKW.DEFAULT_MODEL] else []
is_2d_patch = bool(length_2D)
if is_2d_patch and (options is not None) and (OptionKW.OVERLAP_2D in options):
overlap_2D = options.get(OptionKW.OVERLAP_2D)
# Swap OverlapX and OverlapY resulting in an array in order [OverlapY, OverlapX]
# to match length_2D and stride_2D in [Height, Width] orientation.
overlap_2D[1], overlap_2D[0] = overlap_2D[0], overlap_2D[1]
# Adjust stride_2D with overlap_2D
stride_2D = [x1 - x2 for (x1, x2) in zip(length_2D, overlap_2D)]
# Add microscopy pixel size and pixel size units from options to metadata for filenames_pairs
if (options is not None) and (OptionKW.PIXEL_SIZE in options):
metadata[MetadataKW.PIXEL_SIZE] = options.get(OptionKW.PIXEL_SIZE)
if (options is not None) and (OptionKW.PIXEL_SIZE_UNITS in options):
metadata[MetadataKW.PIXEL_SIZE_UNITS] = options.get(OptionKW.PIXEL_SIZE_UNITS)
filename_pairs = [(fname_images, None, fname_roi, metadata if isinstance(metadata, list) else [metadata])]
if kernel_3D:
ds = MRI3DSubVolumeSegmentationDataset(filename_pairs,
transform=tranform_lst,
length=context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.LENGTH_3D],
stride=context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.STRIDE_3D],
slice_axis=slice_axis)
logger.info(f"Loaded {len(ds)} {loader_params[LoaderParamsKW.SLICE_AXIS]} volumes of shape "
f"{context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.LENGTH_3D]}.")
else:
ds = MRI2DSegmentationDataset(filename_pairs,
length=length_2D,
stride=stride_2D,
slice_axis=slice_axis,
cache=True,
transform=tranform_lst,
slice_filter_fn=SliceFilter(
**loader_params[LoaderParamsKW.SLICE_FILTER_PARAMS]))
ds.load_filenames()
if is_2d_patch:
logger.info(f"Loaded {len(ds)} {loader_params[LoaderParamsKW.SLICE_AXIS]} patches of shape {length_2D}.")
else:
logger.info(f"Loaded {len(ds)} {loader_params[LoaderParamsKW.SLICE_AXIS]} slices.")
model_params = {}
if ConfigKW.FILMED_UNET in context and context[ConfigKW.FILMED_UNET][ModelParamsKW.APPLIED]:
onehotencoder = get_onehotencoder(context, folder_model, options, ds)
model_params.update({ModelParamsKW.NAME: ConfigKW.FILMED_UNET,
ModelParamsKW.FILM_ONEHOTENCODER: onehotencoder,
ModelParamsKW.N_METADATA: len([ll for l in onehotencoder.categories_ for ll in l])})
# Data Loader
data_loader = DataLoader(ds, batch_size=context[ConfigKW.TRAINING_PARAMETERS][TrainingParamsKW.BATCH_SIZE],
shuffle=False, pin_memory=True,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
# Loop across batches
preds_list, slice_idx_list = [], []
last_sample_bool, weight_matrix, volume, image = False, None, None, None
for i_batch, batch in enumerate(tqdm(data_loader, desc="Segment_volume")):
preds = get_preds(context, fname_model, model_params, gpu_id, batch)
# Set datatype to gt since prediction should be processed the same way as gt
for b in batch[MetadataKW.INPUT_METADATA]:
for modality in b:
modality['data_type'] = 'gt'
# Reconstruct 3D object
pred_list, target_list, last_sample_bool, weight_matrix, volume, image = reconstruct_3d_object(
context, batch, undo_transforms, preds, preds_list, kernel_3D, is_2d_patch, slice_axis,
slice_idx_list, data_loader, fname_images, i_batch, last_sample_bool, weight_matrix,
volume, image
)
return pred_list, target_list
| def segment_volume(folder_model: str, fname_images: list, gpu_id: int = 0, options: dict = None):
"""Segment an image.
Segment an image (`fname_image`) using a pre-trained model (`folder_model`). If provided, a region of interest
(`fname_roi`) is used to crop the image prior to segment it.
Args:
folder_model (str): foldername which contains
(1) the model ('folder_model/folder_model.pt') to use
(2) its configuration file ('folder_model/folder_model.json') used for the training,
see https://github.com/neuropoly/ivadomed/wiki/configuration-file
fname_images (list): list of image filenames (e.g. .nii.gz) to segment. Multichannel models require multiple
images to segment, e.i., len(fname_images) > 1.
gpu_id (int): Number representing gpu number if available. Currently does NOT support multiple GPU segmentation.
options (dict): This can optionally contain any of the following key-value pairs:
* 'binarize_prediction': (float) Binarize segmentation with specified threshold. \
Predictions below the threshold become 0, and predictions above or equal to \
threshold become 1. Set to -1 for no thresholding (i.e., soft segmentation).
* 'binarize_maxpooling': (bool) Binarize by setting to 1 the voxel having the maximum prediction across \
all classes. Useful for multiclass models.
* 'fill_holes': (bool) Fill small holes in the segmentation.
* 'keep_largest': (bool) Keep the largest connected-object for each class from the output segmentation.
* 'remove_small': (list of str) Minimal object size to keep with unit (mm3 or vox). A single value can be provided \
or one value per prediction class. Single value example: ["1mm3"], ["5vox"]. Multiple values \
example: ["10", "20", "10vox"] (remove objects smaller than 10 voxels for class 1 and 3, \
and smaller than 20 voxels for class 2).
* 'pixel_size': (list of float) List of microscopy pixel size in micrometers. \
Length equals 2 [PixelSizeX, PixelSizeY] for 2D or 3 [PixelSizeX, PixelSizeY, PixelSizeZ] for 3D, \
where X is the width, Y the height and Z the depth of the image.
* 'pixel_size_units': (str) Units of pixel size (Must be either "mm", "um" or "nm")
* 'overlap_2D': (list of int) List of overlaps in pixels for 2D patching. Length equals 2 [OverlapX, OverlapY], \
where X is the width and Y the height of the image.
* 'metadata': (str) Film metadata.
* 'fname_prior': (str) An image filename (e.g., .nii.gz) containing processing information \
(e.g., spinal cord segmentation, spinal location or MS lesion classification, spinal cord centerline), \
used to crop the image prior to segment it if provided. \
The segmentation is not performed on the slices that are empty in this image.
Returns:
list, list: List of nibabel objects containing the soft segmentation(s), one per prediction class, \
List of target suffix associated with each prediction in `pred_list`
"""
# Check if model folder exists and get filenames to be stored as string
fname_model: str
fname_model_metadata: str
fname_model, fname_model_metadata = imed_models.get_model_filenames(folder_model)
# Load model training config
context = imed_config_manager.ConfigurationManager(fname_model_metadata).get_config()
postpro_list = ['binarize_prediction', 'binarize_maxpooling', 'keep_largest', ' fill_holes',
'remove_small']
if options is not None and any(pp in options for pp in postpro_list):
set_postprocessing_options(options, context)
# LOADER
loader_params = context[ConfigKW.LOADER_PARAMETERS]
slice_axis = imed_utils.AXIS_DCT[loader_params[LoaderParamsKW.SLICE_AXIS]]
metadata = {}
fname_roi = None
if (options is not None) and (OptionKW.FNAME_PRIOR in options):
fname_prior = options.get(OptionKW.FNAME_PRIOR)
else:
fname_prior = None
if fname_prior is not None:
if LoaderParamsKW.ROI_PARAMS in loader_params and loader_params[LoaderParamsKW.ROI_PARAMS][ROIParamsKW.SUFFIX] is not None:
fname_roi = fname_prior
# TRANSFORMATIONS
metadata = process_transformations(context, fname_roi, fname_prior, metadata, slice_axis, fname_images)
# Compose transforms
_, _, transform_test_params = imed_transforms.get_subdatasets_transforms(context[ConfigKW.TRANSFORMATION])
tranform_lst, undo_transforms = imed_transforms.prepare_transforms(transform_test_params)
# Force filter_empty_mask to False if fname_roi = None
if fname_roi is None and SliceFilterParamsKW.FILTER_EMPTY_MASK in loader_params[LoaderParamsKW.SLICE_FILTER_PARAMS]:
logger.warning("fname_roi has not been specified, then the entire volume is processed.")
loader_params[LoaderParamsKW.SLICE_FILTER_PARAMS][SliceFilterParamsKW.FILTER_EMPTY_MASK] = False
kernel_3D = bool(ConfigKW.MODIFIED_3D_UNET in context and context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.APPLIED]) or \
not context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.IS_2D]
# Assign length_2D and stride_2D for 2D patching
length_2D = context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.LENGTH_2D] if \
ModelParamsKW.LENGTH_2D in context[ConfigKW.DEFAULT_MODEL] else []
stride_2D = context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.STRIDE_2D] if \
ModelParamsKW.STRIDE_2D in context[ConfigKW.DEFAULT_MODEL] else []
is_2d_patch = bool(length_2D)
if is_2d_patch and (options is not None) and (OptionKW.OVERLAP_2D in options):
overlap_2D = options.get(OptionKW.OVERLAP_2D)
# Swap OverlapX and OverlapY resulting in an array in order [OverlapY, OverlapX]
# to match length_2D and stride_2D in [Height, Width] orientation.
overlap_2D[1], overlap_2D[0] = overlap_2D[0], overlap_2D[1]
# Adjust stride_2D with overlap_2D
stride_2D = [x1 - x2 for (x1, x2) in zip(length_2D, overlap_2D)]
# Add microscopy pixel size and pixel size units from options to metadata for filenames_pairs
if (options is not None) and (OptionKW.PIXEL_SIZE in options):
metadata[MetadataKW.PIXEL_SIZE] = options.get(OptionKW.PIXEL_SIZE)
if (options is not None) and (OptionKW.PIXEL_SIZE_UNITS in options):
metadata[MetadataKW.PIXEL_SIZE_UNITS] = options.get(OptionKW.PIXEL_SIZE_UNITS)
filename_pairs = [(fname_images, None, fname_roi, metadata if isinstance(metadata, list) else [metadata])]
if kernel_3D:
ds = MRI3DSubVolumeSegmentationDataset(filename_pairs,
transform=tranform_lst,
length=context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.LENGTH_3D],
stride=context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.STRIDE_3D],
slice_axis=slice_axis)
logger.info(f"Loaded {len(ds)} {loader_params[LoaderParamsKW.SLICE_AXIS]} volumes of shape "
f"{context[ConfigKW.MODIFIED_3D_UNET][ModelParamsKW.LENGTH_3D]}.")
else:
ds = MRI2DSegmentationDataset(filename_pairs,
length=length_2D,
stride=stride_2D,
slice_axis=slice_axis,
cache=True,
transform=tranform_lst,
slice_filter_fn=SliceFilter(
**loader_params[LoaderParamsKW.SLICE_FILTER_PARAMS]))
ds.load_filenames()
if is_2d_patch:
logger.info(f"Loaded {len(ds)} {loader_params[LoaderParamsKW.SLICE_AXIS]} patches of shape {length_2D}.")
else:
logger.info(f"Loaded {len(ds)} {loader_params[LoaderParamsKW.SLICE_AXIS]} slices.")
model_params = {}
if ConfigKW.FILMED_UNET in context and context[ConfigKW.FILMED_UNET][ModelParamsKW.APPLIED]:
onehotencoder = get_onehotencoder(context, folder_model, options, ds)
model_params.update({ModelParamsKW.NAME: ConfigKW.FILMED_UNET,
ModelParamsKW.FILM_ONEHOTENCODER: onehotencoder,
ModelParamsKW.N_METADATA: len([ll for l in onehotencoder.categories_ for ll in l])})
# Data Loader
data_loader = DataLoader(ds, batch_size=context[ConfigKW.TRAINING_PARAMETERS][TrainingParamsKW.BATCH_SIZE],
shuffle=False, pin_memory=True,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
# Loop across batches
preds_list, slice_idx_list = [], []
last_sample_bool, weight_matrix, volume, image = False, None, None, None
for i_batch, batch in enumerate(tqdm(data_loader, desc="Inference:", position=0)):
preds = get_preds(context, fname_model, model_params, gpu_id, batch)
# Set datatype to gt since prediction should be processed the same way as gt
for b in batch[MetadataKW.INPUT_METADATA]:
for modality in b:
modality['data_type'] = 'gt'
# Reconstruct 3D object
pred_list, target_list, last_sample_bool, weight_matrix, volume, image = reconstruct_3d_object(
context, batch, undo_transforms, preds, preds_list, kernel_3D, is_2d_patch, slice_axis,
slice_idx_list, data_loader, fname_images, i_batch, last_sample_bool, weight_matrix,
volume, image
)
return pred_list, target_list
|
2,758 | def test_spca_n_iter_deprecation():
"""Check that we raise a warning for the deprecation of `n_iter` and it is ignore
when `max_iter` is specified.
"""
rng = np.random.RandomState(0)
n_samples, n_features = 12, 10
X = rng.randn(n_samples, n_features)
warn_msg = "'n_iter' is deprecated in version 1.1 and will be removed"
with pytest.warns(FutureWarning, match=warn_msg):
MiniBatchSparsePCA(n_iter=2).fit(X)
n_iter, max_iter = 1, 100
model = MiniBatchSparsePCA(n_iter=n_iter, max_iter=max_iter, random_state=0).fit(X)
assert model.n_iter_ > 1
assert model.n_iter_ <= max_iter
| def test_spca_n_iter_deprecation():
"""Check that we raise a warning for the deprecation of `n_iter` and it is ignored
when `max_iter` is specified.
"""
rng = np.random.RandomState(0)
n_samples, n_features = 12, 10
X = rng.randn(n_samples, n_features)
warn_msg = "'n_iter' is deprecated in version 1.1 and will be removed"
with pytest.warns(FutureWarning, match=warn_msg):
MiniBatchSparsePCA(n_iter=2).fit(X)
n_iter, max_iter = 1, 100
model = MiniBatchSparsePCA(n_iter=n_iter, max_iter=max_iter, random_state=0).fit(X)
assert model.n_iter_ > 1
assert model.n_iter_ <= max_iter
|
40,198 | def _check_nonnegative_integers(
data: Union[pd.DataFrame, np.ndarray, sp_sparse.spmatrix, h5py.Dataset],
n_to_check: int = 20,
):
"""Approximately checks values of data to ensure it is count data."""
# for backed anndata
if isinstance(data, h5py.Dataset) or isinstance(data, SparseDataset):
data = data[:100]
if isinstance(data, np.ndarray):
data = data
elif issubclass(type(data), sp_sparse.spmatrix):
data = data.data
elif isinstance(data, pd.DataFrame):
data = data.to_numpy()
else:
raise TypeError("data type not understood")
inds = np.random.choice(len(data), size=(n_to_check,))
check = jax.device_put(data.flat[inds], device=jax.devices("cpu")[0])
negative, non_integer = _is_count(check)
return not (negative or non_integer)
| def _check_nonnegative_integers(
data: Union[pd.DataFrame, np.ndarray, sp_sparse.spmatrix, h5py.Dataset],
n_to_check: int = 20,
):
"""Approximately checks values of data to ensure it is count data."""
# for backed anndata
if isinstance(data, h5py.Dataset) or isinstance(data, SparseDataset):
data = data[:100]
if isinstance(data, np.ndarray):
data = data
elif issubclass(type(data), sp_sparse.spmatrix):
data = data.data
elif isinstance(data, pd.DataFrame):
data = data.to_numpy()
else:
raise TypeError("data type not understood")
inds = np.random.choice(len(data), size=(n_to_check,))
check = jax.device_put(data.flat[inds], device=jax.devices("cpu")[0])
negative, non_integer = _is_not_count_val(check)
return not (negative or non_integer)
|
28,230 | def test_nested_measurement_throws_error(experiment, DAC, DMM):
meas1 = Measurement()
meas2 = Measurement()
# pytest.raises(Exception): does not work because it does not allow
# the state of _is_entered to be changed to False when context manager
# ends. Hence all the test after this one fails.
try:
with meas1.run():
with meas2.run():
pass
pass
except RuntimeError:
return True
assert meas1.run()._is_entered == False
assert meas2.run()._is_entered == False
| def test_nested_measurement_throws_error(experiment):
meas1 = Measurement()
meas2 = Measurement()
# pytest.raises(Exception): does not work because it does not allow
# the state of _is_entered to be changed to False when context manager
# ends. Hence all the test after this one fails.
try:
with meas1.run():
with meas2.run():
pass
pass
except RuntimeError:
return True
assert meas1.run()._is_entered == False
assert meas2.run()._is_entered == False
|
34,260 | def _overwrite_endpoints_for_local_x(
endpoints: AvailableEndpoints, rasa_x_token: Text, rasa_x_url: Text
):
from rasa.utils.endpoints import EndpointConfig
import questionary
# Checking if endpoint.yml has existing url and wait time values set, if so give warning we are overwriting
# the endpoint.yml file.
custom_wait_time_pulls = endpoints.model.kwargs["wait_time_between_pulls"]
custom_url = endpoints.model.url
if custom_url is not None:
cli_utils.print_warning(
"Modifying the endpoints.yml file for Rasa X with our defaults"
)
if custom_wait_time_pulls:
endpoints.model = EndpointConfig(
"{}/projects/default/models/tags/production".format(rasa_x_url),
token=rasa_x_token,
wait_time_between_pulls=custom_wait_time_pulls,
)
else:
endpoints.model = EndpointConfig(
"{}/projects/default/models/tags/production".format(rasa_x_url),
token=rasa_x_token,
wait_time_between_pulls=2,
)
overwrite_existing_event_broker = False
if endpoints.event_broker and not _is_correct_event_broker(endpoints.event_broker):
cli_utils.print_error(
"Rasa X currently only supports a SQLite event broker with path '{}' "
"when running locally. You can deploy Rasa X with Docker "
"(https://rasa.com/docs/rasa-x/deploy/) if you want to use "
"other event broker configurations.".format(DEFAULT_EVENTS_DB)
)
overwrite_existing_event_broker = questionary.confirm(
"Do you want to continue with the default SQLite event broker?"
).ask()
if not overwrite_existing_event_broker:
exit(0)
if not endpoints.tracker_store or overwrite_existing_event_broker:
endpoints.event_broker = EndpointConfig(type="sql", db=DEFAULT_EVENTS_DB)
| def _overwrite_endpoints_for_local_x(
endpoints: AvailableEndpoints, rasa_x_token: Text, rasa_x_url: Text
):
from rasa.utils.endpoints import EndpointConfig
import questionary
# Checking if endpoint.yml has existing url and wait time values set, if so give warning we are overwriting
# the endpoint.yml file.
custom_wait_time_pulls = endpoints.model.kwargs["wait_time_between_pulls"]
custom_url = endpoints.model.url
if custom_url is not None and custom_url != model_pull_url:
cli_utils.print_warning(
"Modifying the endpoints.yml file for Rasa X with our defaults"
)
if custom_wait_time_pulls:
endpoints.model = EndpointConfig(
"{}/projects/default/models/tags/production".format(rasa_x_url),
token=rasa_x_token,
wait_time_between_pulls=custom_wait_time_pulls,
)
else:
endpoints.model = EndpointConfig(
"{}/projects/default/models/tags/production".format(rasa_x_url),
token=rasa_x_token,
wait_time_between_pulls=2,
)
overwrite_existing_event_broker = False
if endpoints.event_broker and not _is_correct_event_broker(endpoints.event_broker):
cli_utils.print_error(
"Rasa X currently only supports a SQLite event broker with path '{}' "
"when running locally. You can deploy Rasa X with Docker "
"(https://rasa.com/docs/rasa-x/deploy/) if you want to use "
"other event broker configurations.".format(DEFAULT_EVENTS_DB)
)
overwrite_existing_event_broker = questionary.confirm(
"Do you want to continue with the default SQLite event broker?"
).ask()
if not overwrite_existing_event_broker:
exit(0)
if not endpoints.tracker_store or overwrite_existing_event_broker:
endpoints.event_broker = EndpointConfig(type="sql", db=DEFAULT_EVENTS_DB)
|
27,751 | def getparentnodeids(nodeid: str) -> Iterator[str]:
"""Return the parent node IDs of a given node ID, inclusive.
For the node ID
"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
the result would be
""
"testing"
"testing/code"
"testing/code/test_excinfo.py"
"testing/code/test_excinfo.py::TestFormattedExcinfo"
"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
Note that :: parts are only considered at the last / component.
"""
pos = 0
sep = SEP
yield ""
while True:
at = nodeid.find(sep, pos)
if at == -1 and sep == SEP:
sep = "::"
elif at == -1:
if nodeid:
yield nodeid
break
else:
if at:
yield nodeid[:at]
pos = at + len(sep)
| def iterparentnodeids(nodeid: str) -> Iterator[str]:
"""Return the parent node IDs of a given node ID, inclusive.
For the node ID
"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
the result would be
""
"testing"
"testing/code"
"testing/code/test_excinfo.py"
"testing/code/test_excinfo.py::TestFormattedExcinfo"
"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
Note that :: parts are only considered at the last / component.
"""
pos = 0
sep = SEP
yield ""
while True:
at = nodeid.find(sep, pos)
if at == -1 and sep == SEP:
sep = "::"
elif at == -1:
if nodeid:
yield nodeid
break
else:
if at:
yield nodeid[:at]
pos = at + len(sep)
|
30,387 | def encrypt_email_body(client: Client, args: Dict):
""" generate an S/MIME-encrypted message
Args:
client: Client
args: Dict
"""
message_body = args.get('message', '').encode('utf-8')
# Make a MemoryBuffer of the message.
buf = makebuf(message_body)
# Load target cert to encrypt to.
x509 = X509.load_cert(client.public_key_file)
sk = X509.X509_Stack()
sk.push(x509)
client.smime.set_x509_stack(sk)
# Set cipher: 3-key triple-DES in CBC mode.
client.smime.set_cipher(SMIME.Cipher('des_ede3_cbc'))
# Encrypt the buffer.
p7 = client.smime.encrypt(buf)
# Output p7 in mail-friendly format.
out = BIO.MemoryBuffer()
client.smime.write(out, p7)
encrypted_message = out.read().decode('utf-8')
message = encrypted_message.split('\n\n')
headers = message[0]
new_headers = headers.replace(': ', '=').replace('\n', ',')
entry_context = {
'SMIME': {
'Message': encrypted_message,
'Headers': new_headers
}
}
return encrypted_message, entry_context
| def encrypt_email_body(client: Client, args: Dict):
""" generate an S/MIME-encrypted message
Args:
client: Client
args: Dict
"""
message_body = args.get('message', '').encode('utf-8')
# Make a MemoryBuffer of the message.
buf = makebuf(message_body)
# Load target cert to encrypt to.
x509 = X509.load_cert(client.public_key_file)
sk = X509.X509_Stack()
sk.push(x509)
client.smime.set_x509_stack(sk)
# Set cipher: 3-key triple-DES in CBC mode.
client.smime.set_cipher(SMIME.Cipher('des_ede3_cbc'))
# Encrypt the buffer.
p7 = client.smime.encrypt(buf)
# Output p7 in mail-friendly format.
out = BIO.MemoryBuffer()
client.smime.write(out, p7)
encrypted_message = out.read().decode('utf-8')
message = encrypted_message.split('\n\n')
headers = message[0]
new_headers = headers.replace(': ', '=').replace('\n', ',')
entry_context = {
'SMIME.Encrypted': {
'Message': encrypted_message,
'Headers': new_headers
}
}
return encrypted_message, entry_context
|
1,306 | def test_bagging_classifier_voting():
# Test BaggingClassifier when base_estimator doesn't define predict_proba
A = np.random.rand(10, 4)
Y = np.random.randint(2, size=10, dtype=np.bool)
bagging_classifier = BaggingClassifier(DummyVoteClassifier())
bagging_classifier.fit(A, Y)
# All ensemble members predict True; BaggingClassifier should predict True
assert(bagging_classifier.predict(A).all())
| def test_bagging_classifier_voting():
# Test BaggingClassifier when base_estimator doesn't define predict_proba
A = np.random.rand(10, 4)
Y = np.random.randint(2, size=10, dtype=np.bool)
bagging_classifier = BaggingClassifier(ConstantClassifier(constant=True))
bagging_classifier.fit(A, Y)
# All ensemble members predict True; BaggingClassifier should predict True
assert(bagging_classifier.predict(A).all())
|
5,323 | def find_health_check(Id=None, Name=None, region=None, key=None, keyid=None, profile=None):
'''
Return detailed info about healthcheck with given Id or name.
name
The name of the health check to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.find_health_checks ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
ret = find_health_checks(Id=Id, Name=Name, region=region, key=key, keyid=keyid, profile=profile)
if len(ret) > 1:
ids = [z['Id'] for z in ret]
raise SaltInvocationError(
'Request matched more than one HealthCheck (%s). Refine your '
'criteria and try again.'.format(ids)
)
return ret[0] if len(ret) > 0 else None
| def find_health_check(Id=None, Name=None, region=None, key=None, keyid=None, profile=None):
'''
Return detailed info about healthcheck with given Id or name.
name
The name of the health check to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.find_health_checks ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
ret = find_health_checks(Id=Id, Name=Name, region=region, key=key, keyid=keyid, profile=profile)
if len(ret) > 1:
ids = [z['Id'] for z in ret]
raise SaltInvocationError(
'Request matched more than one HealthCheck (%s). Refine your '
'criteria and try again.'.format(ids)
)
return ret[0] if ret else None
|
43,835 | def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
| def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
With the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
30,180 | def execute_link(link_cmd_args, record_streams, quiet):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
if (quiet == False): #record_streams true, quiet false
return_code, stdout_str, stderr_str = \
securesystemslib.process.run_duplicate_streams(link_cmd_args)
else: #record_streams true, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.PIPE,
stderr=securesystemslib.process.PIPE)
stdout_str = process.stdout
stderr_str = process.stderr
return_code = process.returncode
else:
if (quiet == False): #record_streams false, quiet false
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=None, stderr=None)
stdout_str = stderr_str = ""
return_code = process.returncode
else: #record_streams false, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.DEVNULL,
stderr=securesystemslib.process.DEVNULL)
stdout_str = stderr_str = ""
return_code = process.returncode
return {
"stdout": stdout_str,
"stderr": stderr_str,
"return-value": return_code
}
| def execute_link(link_cmd_args, record_streams, quiet=True):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
if (quiet == False): #record_streams true, quiet false
return_code, stdout_str, stderr_str = \
securesystemslib.process.run_duplicate_streams(link_cmd_args)
else: #record_streams true, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.PIPE,
stderr=securesystemslib.process.PIPE)
stdout_str = process.stdout
stderr_str = process.stderr
return_code = process.returncode
else:
if (quiet == False): #record_streams false, quiet false
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=None, stderr=None)
stdout_str = stderr_str = ""
return_code = process.returncode
else: #record_streams false, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.DEVNULL,
stderr=securesystemslib.process.DEVNULL)
stdout_str = stderr_str = ""
return_code = process.returncode
return {
"stdout": stdout_str,
"stderr": stderr_str,
"return-value": return_code
}
|
3,250 | def _do_save_event(
cache_key=None, data=None, start_time=None, event_id=None, project_id=None, **kwargs
):
"""
Saves an event to the database.
"""
set_current_project(project_id)
from sentry.event_manager import EventManager, HashDiscarded
event_type = "none"
if cache_key and data is None:
with metrics.timer("tasks.store.do_save_event.get_cache") as metric_tags:
data = event_processing_store.get(cache_key)
if data is not None:
metric_tags["event_type"] = event_type = data.get("type") or "none"
with metrics.global_tags(event_type=event_type):
if data is not None:
data = CanonicalKeyDict(data)
if event_id is None and data is not None:
event_id = data["event_id"]
# only when we come from reprocessing we get a project_id sent into
# the task.
if project_id is None:
project_id = data.pop("project")
set_current_project(project_id)
# We only need to delete raw events for events that support
# reprocessing. If the data cannot be found we want to assume
# that we need to delete the raw event.
if not data or reprocessing.event_supports_reprocessing(data):
with metrics.timer("tasks.store.do_save_event.delete_raw_event"):
delete_raw_event(project_id, event_id, allow_hint_clear=True)
# This covers two cases: where data is None because we did not manage
# to fetch it from the default cache or the empty dictionary was
# stored in the default cache. The former happens if the event
# expired while being on the queue, the second happens on reprocessing
# if the raw event was deleted concurrently while we held on to
# it. This causes the node store to delete the data and we end up
# fetching an empty dict. We could in theory not invoke `save_event`
# in those cases but it's important that we always clean up the
# reprocessing reports correctly or they will screw up the UI. So
# to future proof this correctly we just handle this case here.
if not data:
metrics.incr(
"events.failed", tags={"reason": "cache", "stage": "post"}, skip_internal=False
)
return
try:
with metrics.timer("tasks.store.do_save_event.event_manager.save"):
manager = EventManager(data)
# event.project.organization is populated after this statement.
manager.save(
project_id, assume_normalized=True, start_time=start_time, cache_key=cache_key
)
except HashDiscarded:
pass
finally:
if cache_key:
with metrics.timer("tasks.store.do_save_event.delete_cache"):
event_processing_store.delete_by_key(cache_key)
with metrics.timer("tasks.store.do_save_event.delete_attachment_cache"):
attachment_cache.delete(cache_key)
if start_time:
metrics.timing(
"events.time-to-process", time() - start_time, instance=data["platform"]
)
time_internal_metrics_event(data, project_id)
| def _do_save_event(
cache_key=None, data=None, start_time=None, event_id=None, project_id=None, **kwargs
):
"""
Saves an event to the database.
"""
set_current_project(project_id)
from sentry.event_manager import EventManager, HashDiscarded
event_type = "none"
if cache_key and data is None:
with metrics.timer("tasks.store.do_save_event.get_cache") as metric_tags:
data = event_processing_store.get(cache_key)
if data is not None:
metric_tags["event_type"] = event_type = data.get("type") or "none"
with metrics.global_tags(event_type=event_type):
if data is not None:
data = CanonicalKeyDict(data)
if event_id is None and data is not None:
event_id = data["event_id"]
# only when we come from reprocessing we get a project_id sent into
# the task.
if project_id is None:
project_id = data.pop("project")
set_current_project(project_id)
# We only need to delete raw events for events that support
# reprocessing. If the data cannot be found we want to assume
# that we need to delete the raw event.
if not data or reprocessing.event_supports_reprocessing(data):
with metrics.timer("tasks.store.do_save_event.delete_raw_event"):
delete_raw_event(project_id, event_id, allow_hint_clear=True)
# This covers two cases: where data is None because we did not manage
# to fetch it from the default cache or the empty dictionary was
# stored in the default cache. The former happens if the event
# expired while being on the queue, the second happens on reprocessing
# if the raw event was deleted concurrently while we held on to
# it. This causes the node store to delete the data and we end up
# fetching an empty dict. We could in theory not invoke `save_event`
# in those cases but it's important that we always clean up the
# reprocessing reports correctly or they will screw up the UI. So
# to future proof this correctly we just handle this case here.
if not data:
metrics.incr(
"events.failed", tags={"reason": "cache", "stage": "post"}, skip_internal=False
)
return
try:
with metrics.timer("tasks.store.do_save_event.event_manager.save"):
manager = EventManager(data)
# event.project.organization is populated after this statement.
manager.save(
project_id, assume_normalized=True, start_time=start_time, cache_key=cache_key
)
except HashDiscarded:
pass
finally:
if cache_key:
with metrics.timer("tasks.store.do_save_event.delete_cache"):
event_processing_store.delete_by_key(cache_key)
with metrics.timer("tasks.store.do_save_event.delete_attachment_cache"):
attachment_cache.delete(cache_key)
if start_time:
metrics.timing(
"events.time-to-process", time() - start_time, instance=data["platform"]
)
_time_internal_metrics_event(data, project_id, start_time)
|
5,298 | def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss != 1 and m.loss != 2:
raise ValueError("Loss paramter value can be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model != 1 and m.model != 2:
raise ValueError(
"Model parameter value can be either 1 (for Continous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
| def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss != 1 and m.loss != 2:
raise ValueError("The fasttext `loss` parameter must be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model != 1 and m.model != 2:
raise ValueError(
"Model parameter value can be either 1 (for Continous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
59,041 | def fold(vyper_ast_node: vy_ast.Module) -> None:
"""
Perform literal folding operations on a Vyper AST.
Arguments
---------
vyper_ast_node : Module
Top-level Vyper AST node.
"""
while True:
changed_nodes = 0
changed_nodes += replace_literal_ops(vyper_ast_node)
changed_nodes += replace_subscripts(vyper_ast_node)
if not changed_nodes:
return
| def fold(vyper_ast_node: vy_ast.Module) -> None:
"""
Perform literal folding operations on a Vyper AST.
Arguments
---------
vyper_ast_node : Module
Top-level Vyper AST node.
"""
changed_nodes = 42 # non-zero value to start the loop
while changed_nodes > 0:
changed_nodes = 0
changed_nodes += replace_literal_ops(vyper_ast_node)
changed_nodes += replace_subscripts(vyper_ast_node)
|
57,904 | def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
org_id = demisto.params().get("org_id")
access_token = demisto.params().get("access_token")
if not org_id or not access_token:
return_error("Both organization id and access token must be set")
# get the service API url
base_url = "https://www.shiftleft.io/api/v4" # disable-secrets-detection
verify_certificate = True
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get("proxy", False)
demisto.debug(f"Command being called is {demisto.command()}")
try:
headers: Dict = {
"Content-Type": "application/json",
"Authorization": f"Bearer {access_token}",
}
client = ShiftLeftClient(
base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy
)
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
result = test_module(client, org_id)
return_results(result)
elif demisto.command() == "shiftleft-list-app-findings":
return_results(list_app_findings_command(client, org_id, demisto.args()))
elif demisto.command() == "shiftleft-list-app-secrets":
return_results(list_app_secrets_command(client, org_id, demisto.args()))
elif demisto.command() == "shiftleft-list-apps":
return_results(list_apps_command(client, org_id, demisto.args()))
# Log exceptions and return errors
except Exception:
demisto.error(traceback.format_exc()) # print the traceback
| def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
org_id = demisto.params().get("org_id")
access_token = demisto.params().get("access_token")
if not org_id or not access_token:
return_error("Both organization id and access token must be set")
# get the service API url
base_url = "https://www.shiftleft.io/api/v4" # disable-secrets-detection
verify_certificate = True
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get("proxy", False)
demisto.debug(f"Command being called is {demisto.command()}")
try:
headers: Dict = {
"Content-Type": "application/json",
"Authorization": f"Bearer {access_token}",
}
client = ShiftLeftClient(
base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy
)
command = demisto.command()
if command == "test-module":
# This is the call made when pressing the integration Test button.
result = test_module(client, org_id)
return_results(result)
elif command == "shiftleft-list-app-findings":
return_results(list_app_findings_command(client, org_id, demisto.args()))
elif command == "shiftleft-list-app-secrets":
return_results(list_app_secrets_command(client, org_id, demisto.args()))
elif command == "shiftleft-list-apps":
return_results(list_apps_command(client, org_id))
# Log exceptions and return errors
except Exception:
demisto.error(traceback.format_exc()) # print the traceback
|
33,031 | def find_address(args: CommandLineArguments) -> Tuple[str, str]:
name = virt_name(args)
last_exception = None
timeout = float(args.ssh_timeout)
while timeout >= 0:
last_exception = None
stime = time.time()
try:
if interface_exists(f"ve-{name}"):
dev = f"ve-{name}"
elif interface_exists(f"vt-{name}"):
dev = f"vt-{name}"
else:
raise MkosiException("Container/VM interface not found")
link = json.loads(run(["ip", "-j", "link", "show", "dev", dev], stdout=PIPE, text=True).stdout)[0]
if link["operstate"] == "DOWN":
raise MkosiException(
f"{dev} is not enabled. Make sure systemd-networkd is running so it can manage the interface."
)
# Trigger IPv6 neighbor discovery of which we can access the results via `ip neighbor`. This allows us to
# find out the link-local IPv6 address of the container/VM via which we can connect to it.
run(["ping", "-c", "1", "-w", "15", f"ff02::1%{dev}"], stdout=DEVNULL)
for _ in range(50):
neighbors = json.loads(
run(["ip", "-j", "neighbor", "show", "dev", dev], stdout=PIPE, text=True).stdout
)
for neighbor in neighbors:
dst = cast(str, neighbor["dst"])
if dst.startswith("fe80"):
return dev, dst
time.sleep(0.4)
except MkosiException as e:
last_exception = str(e)
time.sleep(1)
timeout = timeout - (time.time() - stime)
die(last_exception or "Container/VM address not found")
| def find_address(args: CommandLineArguments) -> Tuple[str, str]:
name = virt_name(args)
last_exception = None
timeout = float(args.ssh_timeout)
while timeout >= 0:
last_exception = None
stime = time.time()
try:
if interface_exists(f"ve-{name}"):
dev = f"ve-{name}"
elif interface_exists(f"vt-{name}"):
dev = f"vt-{name}"
else:
raise MkosiException("Container/VM interface not found")
link = json.loads(run(["ip", "-j", "link", "show", "dev", dev], stdout=PIPE, text=True).stdout)[0]
if link["operstate"] == "DOWN":
raise MkosiException(
f"{dev} is not enabled. Make sure systemd-networkd is running so it can manage the interface."
)
# Trigger IPv6 neighbor discovery of which we can access the results via `ip neighbor`. This allows us to
# find out the link-local IPv6 address of the container/VM via which we can connect to it.
run(["ping", "-c", "1", "-w", "15", f"ff02::1%{dev}"], stdout=DEVNULL)
for _ in range(50):
neighbors = json.loads(
run(["ip", "-j", "neighbor", "show", "dev", dev], stdout=PIPE, text=True).stdout
)
for neighbor in neighbors:
dst = cast(str, neighbor["dst"])
if dst.startswith("fe80"):
return dev, dst
time.sleep(0.4)
except MkosiException as e:
last_exception = str(e)
time.sleep(1)
timeout -= time.time() - stime
die(last_exception or "Container/VM address not found")
|
8,504 | def asbool(obj):
if isinstance(obj, basestring):
obj = obj.strip().lower()
if obj in truthy:
return True
elif obj in falsy:
return False
else:
raise ValueError(u"String is not true/false: {}".format(obj))
return bool(obj)
| def asbool(obj):
if isinstance(obj, six.string_types):
obj = obj.strip().lower()
if obj in truthy:
return True
elif obj in falsy:
return False
else:
raise ValueError(u"String is not true/false: {}".format(obj))
return bool(obj)
|
13,540 | def projection_shifts_init(A, E, B, shift_options):
"""Find starting shift parameters for low-rank ADI iteration using
Galerkin projection on spaces spanned by LR-ADI iterates.
See [PK16]_, pp. 92-95.
Parameters
----------
A
The |Operator| A from the corresponding Lyapunov equation.
E
The |Operator| E from the corresponding Lyapunov equation.
B
The |VectorArray| B from the corresponding Lyapunov equation.
shift_options
The shift options to use (see :func:`lyap_lrcf_solver_options`).
Returns
-------
shifts
A |NumPy array| containing a set of stable shift parameters.
"""
random_state = get_random_state(seed=shift_options['init_seed'])
for i in range(shift_options['init_maxiter']):
Q = gram_schmidt(B, atol=0, rtol=0)
shifts = spla.eigvals(A.apply2(Q, Q), E.apply2(Q, Q))
shifts = shifts[shifts.real < 0]
if shifts.size == 0:
# use random subspace instead of span{B} (with same dimensions)
B = B.random(len(B), distribution='normal', random_state=random_state)
else:
return shifts
raise RuntimeError('Could not generate initial shifts for low-rank ADI iteration.')
| def projection_shifts_init(A, E, B, shift_options):
"""Find starting shift parameters for low-rank ADI iteration using
Galerkin projection on spaces spanned by LR-ADI iterates.
See [PK16]_, pp. 92-95.
Parameters
----------
A
The |Operator| A from the corresponding Lyapunov equation.
E
The |Operator| E from the corresponding Lyapunov equation.
B
The |VectorArray| B from the corresponding Lyapunov equation.
shift_options
The shift options to use (see :func:`lyap_lrcf_solver_options`).
Returns
-------
shifts
A |NumPy array| containing a set of stable shift parameters.
"""
random_state = get_random_state(seed=shift_options['init_seed'])
for i in range(shift_options['init_maxiter']):
Q = gram_schmidt(B, atol=0, rtol=0)
shifts = spla.eigvals(A.apply2(Q, Q), E.apply2(Q, Q))
shifts = shifts[shifts.real < 0]
if shifts.size == 0:
# use random subspace instead of span{B} (with same dimensions)
B = B.space.random(len(B), distribution='normal', random_state=random_state)
else:
return shifts
raise RuntimeError('Could not generate initial shifts for low-rank ADI iteration.')
|
42,930 | def edge_coords(graph: nx.Graph, l: dict) -> dict:
""" Provides the coordinates for the graph edges when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of edges and their respective coordinates
Returns:
dict: x and y coordinates for beginning and end of each edge
"""
e_x = []
e_y = []
for e in graph.edges():
start_x, start_y = l[e[0]]
end_x, end_y = l[e[1]]
e_x.append(start_x)
e_x.append(end_x)
e_y.append(start_y)
e_y.append(end_y)
e_x.append(None)
e_y.append(None)
return {"x": e_x, "y": e_y}
| def edge_coords(graph: nx.Graph, l: dict) -> dict:
"""Provides the coordinates for the graph edges when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of edges and their respective coordinates
Returns:
dict: x and y coordinates for beginning and end of each edge
"""
e_x = []
e_y = []
for e in graph.edges():
start_x, start_y = l[e[0]]
end_x, end_y = l[e[1]]
e_x.append(start_x)
e_x.append(end_x)
e_y.append(start_y)
e_y.append(end_y)
e_x.append(None)
e_y.append(None)
return {"x": e_x, "y": e_y}
|
7,665 | def session_to_ical(session, detail_level='sessions'):
"""Serialize a contribution into an ical.
:param event: The contribution to serialize
"""
calendar = Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CERN//INDICO//EN')
related_event_uid = 'indico-event-{}@{}'.format(session.event.id, url_parse(config.BASE_URL).host)
if detail_level == 'sessions':
component = generate_session_component(session, related_event_uid)
calendar.add_component(component)
elif detail_level == 'contributions':
from indico.modules.events.contributions.ical import generate_contribution_component
components = [generate_contribution_component(contribution, related_event_uid)
for contribution in session.contributions]
for component in components:
calendar.add_component(component)
data = calendar.to_ical()
return data
| def session_to_ical(session, detail_level='sessions'):
"""Serialize a contribution into an ical.
:param event: The contribution to serialize
"""
calendar = Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CERN//INDICO//EN')
related_event_uid = 'indico-event-{}@{}'.format(session.event.id, url_parse(config.BASE_URL).host)
if detail_level == 'sessions':
component = generate_session_component(session, related_event_uid)
calendar.add_component(component)
elif detail_level == 'contributions':
from indico.modules.events.contributions.ical import generate_contribution_component
components = [generate_contribution_component(contribution, related_event_uid)
for contribution in session.contributions]
for component in components:
calendar.add_component(component)
return calendar.to_ical()
|
31,929 | def main():
incident = demisto.incident()
if not incident:
raise ValueError("Error - demisto.incident() expected to return current incident "
"from context but returned None")
custom_fields = incident.get('CustomFields', {})
identity_results_str = custom_fields.get('identitytable', {})
is_successful = custom_fields.get('successfuldrilldownenrichment', '')
if is_successful == 'false':
return {'ContentsFormat': formats['markdown'], 'Contents': 'Identity enrichment failed.'}
identity_results = json.loads(identity_results_str)
if not identity_results:
return {'ContentsFormat': formats['markdown'], 'Contents': 'No users were found in notable.'}
if isinstance(identity_results, list):
events_arr = []
for event in identity_results:
events_arr.append(event)
markdown = tableToMarkdown("", events_arr, headers=events_arr[0].keys())
else:
markdown = tableToMarkdown("", identity_results)
return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown}
| def main():
incident = demisto.incident()
if not incident:
raise ValueError("Error - demisto.incident() expected to return current incident "
"from context but returned None")
custom_fields = incident.get('CustomFields', {})
identity_results_str = custom_fields.get('identitytable', {})
is_successful = custom_fields.get('successfuldrilldownenrichment', '')
if is_successful == 'false':
return {'ContentsFormat': formats['markdown'], 'Contents': 'Identity enrichment failed.'}
identity_results = json.loads(identity_results_str)
if not identity_results:
return {'ContentsFormat': formats['markdown'], 'Contents': 'No users were found in the notable.'}
if isinstance(identity_results, list):
events_arr = []
for event in identity_results:
events_arr.append(event)
markdown = tableToMarkdown("", events_arr, headers=events_arr[0].keys())
else:
markdown = tableToMarkdown("", identity_results)
return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown}
|
32,189 | def remove_hash_from_blocklist(client: Client, args: dict) -> CommandResults:
"""
Remove a hash from the blocklist (SentinelOne Term: Blacklist)
"""
sha1 = args.get('sha1')
if not sha1:
raise DemistoException("You must specify a valid Sha1 hash")
os_type = args.get('os_type', None)
hash_ids = get_hash_ids_from_blocklist(client, sha1, os_type)
if not hash_ids:
status = {
'hash': sha1,
'status': "Not on blocklist"
}
result = None
else:
result = []
numRemoved = 0
for hash_id in hash_ids:
numRemoved += 1
result.append(client.remove_hash_from_blocklist_request(hash_id=hash_id))
status = {
'hash': sha1,
'status': f"Removed {numRemoved} entries from blocklist"
}
return CommandResults(
readable_output=f"Removed hash {sha1} from the blocklist, or it was already absent",
outputs_prefix='SentinelOne.RemoveHashFromBlocklist',
outputs_key_field='Value',
outputs=status,
raw_response=result)
| def remove_hash_from_blocklist(client: Client, args: dict) -> CommandResults:
"""
Remove a hash from the blocklist (SentinelOne Term: Blacklist)
"""
sha1 = args.get('sha1')
if not sha1:
raise DemistoException("You must specify a valid Sha1 hash")
os_type = args.get('os_type')
hash_ids = get_hash_ids_from_blocklist(client, sha1, os_type)
if not hash_ids:
status = {
'hash': sha1,
'status': "Not on blocklist"
}
result = None
else:
result = []
numRemoved = 0
for hash_id in hash_ids:
numRemoved += 1
result.append(client.remove_hash_from_blocklist_request(hash_id=hash_id))
status = {
'hash': sha1,
'status': f"Removed {numRemoved} entries from blocklist"
}
return CommandResults(
readable_output=f"Removed hash {sha1} from the blocklist, or it was already absent",
outputs_prefix='SentinelOne.RemoveHashFromBlocklist',
outputs_key_field='Value',
outputs=status,
raw_response=result)
|
45,916 | def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp image patches or tensors by normalized 2D homographies.
See :class:`~kornia.geometry.warp.HomographyWarper` for details.
Args:
patch_src: The image or tensor to warp. Should be from source of shape
if homography normalized :math:`(N, C, H, W)`.
if homography not normalized :math:`(B, C, H, W)`
src_homo_dst: The homography or stack of homographies from destination to source of shape
if homography normalized :math:`(N, 3, 3)`
if homography not normalized :math:`(B, 3, 3)`.
dsize:
if homography normalized: The height and width of the image to warp.
if homography not normalized: size of the output image (height, width).
mode: interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values ``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: interpolation flag.
normalized_coordinates: Whether the homography assumes [-1, 1] normalized coordinates or not.
normalized_homography: show is homography normalized.
Return:
Patch sampled at locations from source to destination.
Example_1:
>>> input = torch.rand(1, 3, 32, 32)
>>> homography = torch.eye(3).view(1, 3, 3)
>>> output = homography_warp(input, homography, (32, 32))
Example_2
>>> img = torch.rand(1, 4, 5, 6)
>>> H = torch.eye(3)[None]
>>> out = homography_warp(img, H, (4, 2), align_corners=True, normalized_homography=False)
>>> print(out.shape)
torch.Size([1, 4, 4, 2])
"""
if normalized_homography:
if not src_homo_dst.device == patch_src.device:
raise TypeError(
"Patch and homography must be on the same device. \
Got patch.device: {} src_H_dst.device: {}.".format(
patch_src.device, src_homo_dst.device
)
)
height, width = dsize
grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates)
warped_grid = warp_grid(grid, src_homo_dst)
return F.grid_sample(patch_src, warped_grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
else:
mode = 'bilinear'
align_corners = True
if not isinstance(patch_src, torch.Tensor):
raise TypeError(f"Input src type is not a torch.Tensor. Got {type(patch_src)}")
if not isinstance(src_homo_dst, torch.Tensor):
raise TypeError(f"Input M type is not a torch.Tensor. Got {type(src_homo_dst)}")
if not len(patch_src.shape) == 4:
raise ValueError(f"Input src must be a BxCxHxW tensor. Got {patch_src.shape}")
if not (len(src_homo_dst.shape) == 3 and src_homo_dst.shape[-2:] == (3, 3)):
raise ValueError(f"Input M must be a Bx3x3 tensor. Got {src_homo_dst.shape}")
B, _, H, W = patch_src.size()
h_out, w_out = dsize
# we normalize the 3x3 transformation matrix and convert to 3x4
dst_norm_trans_src_norm: torch.Tensor = normalize_homography(src_homo_dst, (H, W), (h_out, w_out)) # Bx3x3
src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3
# this piece of code substitutes F.affine_grid since it does not support 3x3
grid = (
create_meshgrid(h_out, w_out, normalized_coordinates=True, device=patch_src.device).to(patch_src.dtype).repeat(B, 1, 1, 1))
grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid)
return F.grid_sample(patch_src, grid, align_corners=align_corners, mode=mode, padding_mode=padding_mode)
| def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp image patches or tensors by normalized 2D homographies.
See :class:`~kornia.geometry.warp.HomographyWarper` for details.
Args:
patch_src: The image or tensor to warp. Should be from source of shape
if homography normalized :math:`(N, C, H, W)`.
if homography not normalized :math:`(B, C, H, W)`
src_homo_dst: The homography or stack of homographies from destination to source of shape
if homography normalized :math:`(N, 3, 3)`
if homography not normalized :math:`(B, 3, 3)`.
dsize:
if homography normalized: The height and width of the image to warp.
if homography not normalized: size of the output image (height, width).
mode: interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values ``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: interpolation flag.
normalized_coordinates: Whether the homography assumes [-1, 1] normalized coordinates or not.
normalized_homography: show is homography normalized.
Return:
Patch sampled at locations from source to destination.
Example:
>>> input = torch.rand(1, 3, 32, 32)
>>> homography = torch.eye(3).view(1, 3, 3)
>>> output = homography_warp(input, homography, (32, 32))
Example_2
>>> img = torch.rand(1, 4, 5, 6)
>>> H = torch.eye(3)[None]
>>> out = homography_warp(img, H, (4, 2), align_corners=True, normalized_homography=False)
>>> print(out.shape)
torch.Size([1, 4, 4, 2])
"""
if normalized_homography:
if not src_homo_dst.device == patch_src.device:
raise TypeError(
"Patch and homography must be on the same device. \
Got patch.device: {} src_H_dst.device: {}.".format(
patch_src.device, src_homo_dst.device
)
)
height, width = dsize
grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates)
warped_grid = warp_grid(grid, src_homo_dst)
return F.grid_sample(patch_src, warped_grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
else:
mode = 'bilinear'
align_corners = True
if not isinstance(patch_src, torch.Tensor):
raise TypeError(f"Input src type is not a torch.Tensor. Got {type(patch_src)}")
if not isinstance(src_homo_dst, torch.Tensor):
raise TypeError(f"Input M type is not a torch.Tensor. Got {type(src_homo_dst)}")
if not len(patch_src.shape) == 4:
raise ValueError(f"Input src must be a BxCxHxW tensor. Got {patch_src.shape}")
if not (len(src_homo_dst.shape) == 3 and src_homo_dst.shape[-2:] == (3, 3)):
raise ValueError(f"Input M must be a Bx3x3 tensor. Got {src_homo_dst.shape}")
B, _, H, W = patch_src.size()
h_out, w_out = dsize
# we normalize the 3x3 transformation matrix and convert to 3x4
dst_norm_trans_src_norm: torch.Tensor = normalize_homography(src_homo_dst, (H, W), (h_out, w_out)) # Bx3x3
src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3
# this piece of code substitutes F.affine_grid since it does not support 3x3
grid = (
create_meshgrid(h_out, w_out, normalized_coordinates=True, device=patch_src.device).to(patch_src.dtype).repeat(B, 1, 1, 1))
grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid)
return F.grid_sample(patch_src, grid, align_corners=align_corners, mode=mode, padding_mode=padding_mode)
|
44,611 | def parse_tracking_example(example, dataset_ndims,
dtype=tf.float32):
X_names = ['app', 'cent', 'morph', 'adj']
y_names = ['temp_adj']
sparse_names = ['adj', 'temp_adj']
full_name_dict = {'app': 'appearances',
'cent': 'centroids',
'morph': 'morphologies',
'adj': 'adj_matrices',
'temp_adj': 'temporal_adj_matrices'}
# Recreate the example structure
data = {}
shape_strings_dict = {}
shapes_dict = {}
for key in dataset_ndims:
if 'shape' in key:
new_key = '_'.join(key.split('_')[0:-1])
shapes_dict[new_key] = dataset_ndims[key]
for key in shapes_dict:
dataset_ndims.pop(key + '_shape')
for key in dataset_ndims:
if key in sparse_names:
data[key] = tf.io.SparseFeature(value_key=key + '_val',
index_key=[key + '_ind_' + str(i)
for i in range(dataset_ndims[key])],
size=shapes_dict[key],
dtype=tf.float32)
else:
data[key] = tf.io.FixedLenFeature([], tf.string)
shape_strings = [key + '_shape_' + str(i)
for i in range(dataset_ndims[key])]
shape_strings_dict[key] = shape_strings
for ss in shape_strings:
data[ss] = tf.io.FixedLenFeature([], tf.int64)
# Get data
content = tf.io.parse_single_example(example, data)
X_dict = {}
y_dict = {}
for key in dataset_ndims:
# Get the feature and reshape
if key in sparse_names:
value = content[key]
else:
shape = [content[ss] for ss in shape_strings_dict[key]]
value = content[key]
value = tf.io.parse_tensor(value, out_type=dtype)
value = tf.reshape(value, shape=shape)
if key in X_names:
X_dict[full_name_dict[key]] = value
else:
y_dict[full_name_dict[key]] = value
return X_dict, y_dict
| def parse_tracking_example(example, dataset_ndims,
dtype=tf.float32):
X_names = ['app', 'cent', 'morph', 'adj']
y_names = ['temp_adj']
sparse_names = ['adj', 'temp_adj']
full_name_dict = {'app': 'appearances',
'cent': 'centroids',
'morph': 'morphologies',
'adj': 'adj_matrices',
'temp_adj': 'temporal_adj_matrices'}
# Recreate the example structure
data = {}
shape_strings_dict = {}
shapes_dict = {}
for key in dataset_ndims:
if 'shape' in key:
new_key = '_'.join(key.split('_')[0:-1])
shapes_dict[new_key] = dataset_ndims[key]
for key in shapes_dict:
dataset_ndims.pop(key + '_shape')
for key in dataset_ndims:
if key in sparse_names:
data[key] = tf.io.SparseFeature(value_key=key + '_val',
index_key=[key + '_ind_' + str(i)
for i in range(dataset_ndims[key])],
size=shapes_dict[key],
dtype=tf.float32)
else:
data[key] = tf.io.FixedLenFeature([], tf.string)
shape_strings = ['{}_shape_{}'.format(key, i)
for i in range(dataset_ndims[key])]
shape_strings_dict[key] = shape_strings
for ss in shape_strings:
data[ss] = tf.io.FixedLenFeature([], tf.int64)
# Get data
content = tf.io.parse_single_example(example, data)
X_dict = {}
y_dict = {}
for key in dataset_ndims:
# Get the feature and reshape
if key in sparse_names:
value = content[key]
else:
shape = [content[ss] for ss in shape_strings_dict[key]]
value = content[key]
value = tf.io.parse_tensor(value, out_type=dtype)
value = tf.reshape(value, shape=shape)
if key in X_names:
X_dict[full_name_dict[key]] = value
else:
y_dict[full_name_dict[key]] = value
return X_dict, y_dict
|
7,118 | def blind_richardson_lucy(image, psf=None, iterations=10,
return_iterations=False, clip=False):
"""Blind Richardson-Lucy deconvolution.
Parameters
----------
image : ndarray
Input degraded image (can be N dimensional).
psf : ndarray, optional
A first estimate of the point spread function, same size as image
iterations : int
Number of iterations. This parameter plays the role of
regularisation. After a given iterations, the estimates can produce
division by 0 problems, then the algorithm is automatically stopped.
return_iterations : boolean, optional
Returns instead of a tuple of the final restorated image and used PSF
a tuple of all iterations for further investigation
clip : boolean, optional
True by default. If true, pixel value of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
im_deconv : ndarray
The deconvolved image.
psf : ndarray
The last PSF estimate to deconvolve image
Examples
--------
>>> from skimage.restoration import blind_richardson_lucy
>>> image = np.zeros((100,100))
>>> im[40:60, 45:55] = 1
>>> im[45:55, 40:60] = 1
>>> psf = np.zeros_like(image)
>>> psf[50,50] = 1
>>> psf = gaussian(psf, 2)
>>> image_conv = convolve2d(image, psf, 'same')
>>> deconvolved, calc_psf = blind_richardson_lucy(image_conv, 10)
Notes
-----
This function estimates a point spread function based on an
inverse Richardson Lucy algorithm as described
in Fish et al., 1995. It is an iterative process where the PSF
and image is deconvolved, respectively.
It is more noise tolerant than other algorithms,
such as Ayers-Dainty and the Weiner filter algorithms
(taken from the paper).
The algorithm performs well with gaussian PSFs and can recover
them nicely without any prior knowledge. If one has already an
educated guess, one should pass the PSF as argument to the function.
Note, that the PSF should have the same shape as the image,
and the PSF should be centered.
Due to its nature, the algorithm may divide by 0.
The function catches this issue and aborts the iterative process.
Mostly, the optimal number of iterations is before this error may occur.
References
----------
.. [1] Fish, D. A., A. M. Brinicombe, E. R. Pike, and J. G. Walker.
"Blind deconvolution by means of the Richardson–Lucy algorithm."
JOSA A 12, no. 1 (1995): 58-65.
https://pdfs.semanticscholar.org/9e3f/a71e22caf358dbe873e9649f08c205d0c0c0.pdf
"""
if return_iterations:
all_iterations = np.empty((iterations, 2,) + image.shape)
# Convert image to float for computations
image = image.astype(np.float)
# Initialize im_deconv and PSF
im_deconv = np.full(image.shape, 0.5)
if psf is None:
psf = np.full(image.shape, 0.5)
else:
assert psf.shape == image.shape, \
'Image and PSF should have the same shape!'
psf = psf.astype(np.float)
for i in range(iterations):
# Deconvolve the PSF
# Hack: in original publication one would have used `image`,
# however, this does not work.
# Using `im_deconv` instead recovers PSF.
relative_blur_psf = im_deconv / fftconvolve(psf, im_deconv, 'same')
# Check if relative_blur_psf contains nan,
# causing the algorithm to fail
if np.count_nonzero(~np.isnan(relative_blur_psf)) \
< relative_blur_psf.size:
warnings.warn('Iterations stopped after {} iterations'
' because PSF contains zeros!'.format(i),
RuntimeWarning)
break
else:
psf *= fftconvolve(relative_blur_psf,
im_deconv[::-1, ::-1],
'same')
# Compute inverse again
psf_mirror = psf[::-1, ::-1]
# Standard Richardson-Lucy deconvolution
relative_blur = image / fftconvolve(im_deconv, psf, 'same')
im_deconv *= fftconvolve(relative_blur, psf_mirror, 'same')
# Add iteration to list, if desired
if return_iterations:
all_iterations[i, 0] = im_deconv.copy()
all_iterations[i, 1] = psf.copy()
# Don't know if this makes sense here...
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1
if return_iterations:
return all_iterations
else:
return im_deconv, psf
| def blind_richardson_lucy(image, psf=None, iterations=10,
return_iterations=False, clip=False):
"""Blind Richardson-Lucy deconvolution.
Parameters
----------
image : ndarray
Input degraded image (can be N dimensional).
psf : ndarray, optional
A first estimate of the point spread function, same size as image.
iterations : int
Number of iterations. This parameter plays the role of
regularisation. After a given iterations, the estimates can produce
division by 0 problems, then the algorithm is automatically stopped.
return_iterations : boolean, optional
Returns instead of a tuple of the final restorated image and used PSF
a tuple of all iterations for further investigation
clip : boolean, optional
True by default. If true, pixel value of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
im_deconv : ndarray
The deconvolved image.
psf : ndarray
The last PSF estimate to deconvolve image
Examples
--------
>>> from skimage.restoration import blind_richardson_lucy
>>> image = np.zeros((100,100))
>>> im[40:60, 45:55] = 1
>>> im[45:55, 40:60] = 1
>>> psf = np.zeros_like(image)
>>> psf[50,50] = 1
>>> psf = gaussian(psf, 2)
>>> image_conv = convolve2d(image, psf, 'same')
>>> deconvolved, calc_psf = blind_richardson_lucy(image_conv, 10)
Notes
-----
This function estimates a point spread function based on an
inverse Richardson Lucy algorithm as described
in Fish et al., 1995. It is an iterative process where the PSF
and image is deconvolved, respectively.
It is more noise tolerant than other algorithms,
such as Ayers-Dainty and the Weiner filter algorithms
(taken from the paper).
The algorithm performs well with gaussian PSFs and can recover
them nicely without any prior knowledge. If one has already an
educated guess, one should pass the PSF as argument to the function.
Note, that the PSF should have the same shape as the image,
and the PSF should be centered.
Due to its nature, the algorithm may divide by 0.
The function catches this issue and aborts the iterative process.
Mostly, the optimal number of iterations is before this error may occur.
References
----------
.. [1] Fish, D. A., A. M. Brinicombe, E. R. Pike, and J. G. Walker.
"Blind deconvolution by means of the Richardson–Lucy algorithm."
JOSA A 12, no. 1 (1995): 58-65.
https://pdfs.semanticscholar.org/9e3f/a71e22caf358dbe873e9649f08c205d0c0c0.pdf
"""
if return_iterations:
all_iterations = np.empty((iterations, 2,) + image.shape)
# Convert image to float for computations
image = image.astype(np.float)
# Initialize im_deconv and PSF
im_deconv = np.full(image.shape, 0.5)
if psf is None:
psf = np.full(image.shape, 0.5)
else:
assert psf.shape == image.shape, \
'Image and PSF should have the same shape!'
psf = psf.astype(np.float)
for i in range(iterations):
# Deconvolve the PSF
# Hack: in original publication one would have used `image`,
# however, this does not work.
# Using `im_deconv` instead recovers PSF.
relative_blur_psf = im_deconv / fftconvolve(psf, im_deconv, 'same')
# Check if relative_blur_psf contains nan,
# causing the algorithm to fail
if np.count_nonzero(~np.isnan(relative_blur_psf)) \
< relative_blur_psf.size:
warnings.warn('Iterations stopped after {} iterations'
' because PSF contains zeros!'.format(i),
RuntimeWarning)
break
else:
psf *= fftconvolve(relative_blur_psf,
im_deconv[::-1, ::-1],
'same')
# Compute inverse again
psf_mirror = psf[::-1, ::-1]
# Standard Richardson-Lucy deconvolution
relative_blur = image / fftconvolve(im_deconv, psf, 'same')
im_deconv *= fftconvolve(relative_blur, psf_mirror, 'same')
# Add iteration to list, if desired
if return_iterations:
all_iterations[i, 0] = im_deconv.copy()
all_iterations[i, 1] = psf.copy()
# Don't know if this makes sense here...
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1
if return_iterations:
return all_iterations
else:
return im_deconv, psf
|
32,839 | def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in BLACKLIST_ENDPOINT:
blacklisted = BLACKLIST_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict((name, value) for (name, value) in zip(args_names, args) if name in args_traced)
flat_tags = _flatten_dict(tags, exclude=blacklisted)
span.set_tags({k: truncate_arg_value(v) for k, v in flat_tags.items()})
| def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in BLACKLIST_ENDPOINT:
blacklisted = BLACKLIST_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict((name, value) for (name, value) in zip(args_names, args) if name in args_traced)
flat_tags = _flatten_dict(tags, exclude=exclude)
span.set_tags({k: truncate_arg_value(v) for k, v in flat_tags.items()})
|
30,868 | def query_processes_command():
machine = demisto.getArg('machine')
process_name = demisto.getArg('processName')
only_suspicious = demisto.getArg('onlySuspicious')
has_incoming_connection = demisto.getArg('hasIncomingConnection')
has_outgoing_connection = demisto.getArg('hasOutgoingConnection')
has_external_connection = demisto.getArg('hasExternalConnection')
unsigned_unknown_reputation = demisto.getArg('unsignedUnknownReputation')
from_temporary_folder = demisto.getArg('fromTemporaryFolder')
privileges_escalation = demisto.getArg('privilegesEscalation')
maclicious_psexec = demisto.getArg('maliciousPsExec')
response = query_processes(machine, process_name, only_suspicious, has_incoming_connection, has_outgoing_connection,
has_external_connection, unsigned_unknown_reputation, from_temporary_folder,
privileges_escalation, maclicious_psexec)
elements = dict_safe_get(response, ['data', 'resultIdToElementDataMap'], {}, dict)
outputs = []
for item in elements.values():
if not isinstance(item, dict):
raise ValueError("Cybereason raw response is not valid, item in elements is not dict")
simple_values = item.get('simpleValues', {})
element_values = item.get('elementValues', {})
output = {}
for info in PROCESS_INFO:
if info.get('type') == 'filterData':
output[info['header']] = dict_safe_get(item, ['filterData', 'groupByValue'])
output = update_output(output, simple_values, element_values, PROCESS_INFO)
outputs.append(output)
context = []
for output in outputs:
# Remove whitespaces from dictionary keys
context.append({key.translate(None, ' '): value for key, value in output.iteritems()})
ec = {
'Process': context
}
demisto.results({
'Type': entryTypes.get('note'),
'Contents': response,
'ContentsFormat': formats.get('json'),
'ReadableContentsFormat': formats.get('markdown'),
'HumanReadable': tableToMarkdown('Cybereason Processes', outputs, PROCESS_HEADERS),
'EntryContext': ec
})
| def query_processes_command():
machine = demisto.getArg('machine')
process_name = demisto.getArg('processName')
only_suspicious = demisto.getArg('onlySuspicious')
has_incoming_connection = demisto.getArg('hasIncomingConnection')
has_outgoing_connection = demisto.getArg('hasOutgoingConnection')
has_external_connection = demisto.getArg('hasExternalConnection')
unsigned_unknown_reputation = demisto.getArg('unsignedUnknownReputation')
from_temporary_folder = demisto.getArg('fromTemporaryFolder')
privileges_escalation = demisto.getArg('privilegesEscalation')
maclicious_psexec = demisto.getArg('maliciousPsExec')
response = query_processes(machine, process_name, only_suspicious, has_incoming_connection, has_outgoing_connection,
has_external_connection, unsigned_unknown_reputation, from_temporary_folder,
privileges_escalation, maclicious_psexec)
elements = dict_safe_get(response, ['data', 'resultIdToElementDataMap'], {}, dict)
outputs = []
for item in elements.values():
if not isinstance(item, dict):
raise ValueError("Cybereason raw response is not valid, item in elements is not a dict")
simple_values = item.get('simpleValues', {})
element_values = item.get('elementValues', {})
output = {}
for info in PROCESS_INFO:
if info.get('type') == 'filterData':
output[info['header']] = dict_safe_get(item, ['filterData', 'groupByValue'])
output = update_output(output, simple_values, element_values, PROCESS_INFO)
outputs.append(output)
context = []
for output in outputs:
# Remove whitespaces from dictionary keys
context.append({key.translate(None, ' '): value for key, value in output.iteritems()})
ec = {
'Process': context
}
demisto.results({
'Type': entryTypes.get('note'),
'Contents': response,
'ContentsFormat': formats.get('json'),
'ReadableContentsFormat': formats.get('markdown'),
'HumanReadable': tableToMarkdown('Cybereason Processes', outputs, PROCESS_HEADERS),
'EntryContext': ec
})
|
54,510 | def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
subprocess.check_call(f"echo >| {problems_filename}", shell=True)
# Create ZDT problems
cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"
subprocess.run(cmd, shell=True)
# Create NAS bench problem(C) (for Multi-Objective Settings).
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = (
f'{kurobako_cmd} problem nasbench "{dataset}"'
f"--encoding C --metrics accuracy params | tee -a {problems_filename}"
)
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
name = f"{args.name_prefix}_{sampler}"
python_command = f"mo_runner.py {sampler} {sampler_kwargs}"
cmd = (
f"{kurobako_cmd} solver --name {name} command python {python_command}"
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 1000 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_fn}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
# Report
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
# Plot pareto-front.
problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"]
for problem_name in problem_names:
cmd = (
f"cat {result_filename} | grep {problem_name} | "
f"{kurobako_cmd} plot pareto-front -o {args.out_dir}"
)
subprocess.run(cmd, shell=True)
| def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
subprocess.check_call(f"echo >| {problems_filename}", shell=True)
# Create ZDT problems
cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"
subprocess.run(cmd, shell=True)
# Create NAS bench problem(C) (for Multi-Objective Settings).
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = (
f'{kurobako_cmd} problem nasbench "{dataset}"'
f"--encoding C --metrics accuracy params | tee -a {problems_filename}"
)
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
name = f"{args.name_prefix}_{sampler}"
python_command = f"mo_runner.py {sampler} {sampler_kwargs}"
cmd = (
f"{kurobako_cmd} solver --name {name} command python {python_command}"
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 1000 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_fn}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
# Report
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
# Plot pareto-front.
problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"]
for problem_name in problem_names:
cmd = (
f"cat {result_filename} | grep {problem_name} | "
f"{kurobako_cmd} plot pareto-front -o {args.out_dir} "
f"--xmin {xmin} --xmax {xmax} --ymin {ymin} --ymax {ymax}"
)
subprocess.run(cmd, shell=True)
|
7,507 | def test_pixel_scale_acceptable_scale_unit():
pix = 75 * u.pix
v = 3000 * u.cm / u.s
pixscale = 0.4 * u.m / u.s / u.pix
pixscale2 = 2.5 * u.pix / (u.m / u.s)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale2)), pix)
| def test_pixel_scale_acceptable_scale_unit():
pix = 75 * u.pix
v = 3000 * (u.cm / u.s)
pixscale = 0.4 * u.m / u.s / u.pix
pixscale2 = 2.5 * u.pix / (u.m / u.s)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale2)), pix)
|
30,178 | def execute_link(link_cmd_args, record_streams, quiet):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
if (quiet == False): #record_streams true, quiet false
return_code, stdout_str, stderr_str = \
securesystemslib.process.run_duplicate_streams(link_cmd_args)
else: #record_streams true, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.PIPE,
stderr=securesystemslib.process.PIPE)
stdout_str = process.stdout
stderr_str = process.stderr
return_code = process.returncode
else:
if (quiet == False): #record_streams false, quiet false
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=None, stderr=None)
stdout_str = stderr_str = ""
return_code = process.returncode
else: #record_streams false, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.DEVNULL,
stderr=securesystemslib.process.DEVNULL)
stdout_str = stderr_str = ""
return_code = process.returncode
return {
"stdout": stdout_str,
"stderr": stderr_str,
"return-value": return_code
}
| def execute_link(link_cmd_args, record_streams, quiet):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
if (quiet == False): #record_streams true, quiet false
return_code, stdout_str, stderr_str = \
securesystemslib.process.run_duplicate_streams(link_cmd_args)
else: #record_streams true, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.PIPE,
stderr=securesystemslib.process.PIPE)
stdout_str = process.stdout
stderr_str = process.stderr
return_code = process.returncode
else:
if (quiet == False): #record_streams false, quiet false
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=None, stderr=None)
stdout_str = stderr_str = ""
return_code = process.returncode
else: #record_streams false, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.DEVNULL,
stderr=securesystemslib.process.DEVNULL)
stdout_str = stderr_str = ""
return_code = process.returncode
return {
"stdout": stdout_str,
"stderr": stderr_str,
"return-value": return_code
}
|
6,864 | def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
try:
config.update(get_file_json(common_site_config))
except Exception as error:
click.secho("Common Site Config may be corrupted", fg="red")
print(error)
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
try:
config.update(get_file_json(site_config))
except Exception as error:
click.secho("Site Config for {0} may be corrupted".format(local.site), fg="red")
print(error)
elif local.site and not local.flags.new_site:
raise IncorrectSitePath("{0} does not exist".format(local.site))
return _dict(config)
| def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
try:
config.update(get_file_json(common_site_config))
except Exception as error:
click.secho("common_site_config.json is invalid", fg="red")
print(error)
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
try:
config.update(get_file_json(site_config))
except Exception as error:
click.secho("Site Config for {0} may be corrupted".format(local.site), fg="red")
print(error)
elif local.site and not local.flags.new_site:
raise IncorrectSitePath("{0} does not exist".format(local.site))
return _dict(config)
|
6,628 | def start_import(invoices):
errors = 0
names = []
for idx, d in enumerate(invoices):
try:
invoice_number = ''
set_child_names = False
if d.invoice_number:
invoice_number = d.invoice_number
set_child_names = True
publish(idx, len(invoices), d.doctype)
doc = frappe.get_doc(d)
doc.flags.ignore_mandatory = True
doc.insert(set_name=invoice_number, set_child_names=set_child_names)
doc.submit()
frappe.db.commit()
names.append(doc.name)
except Exception:
errors += 1
frappe.db.rollback()
message = "\n".join(["Data:", dumps(d, default=str, indent=4), "--" * 50, "\nException:", traceback.format_exc()])
frappe.log_error(title="Error while creating Opening Invoice", message=message)
frappe.db.commit()
if errors:
frappe.msgprint(_("You had {} errors while creating opening invoices. Check {} for more details")
.format(errors, "<a href='/app/List/Error Log' class='variant-click'>Error Log</a>"), indicator="red", title=_("Error Occured"))
return names
| def start_import(invoices):
errors = 0
names = []
for idx, d in enumerate(invoices):
try:
invoice_number = None
set_child_names = False
if d.invoice_number:
invoice_number = d.invoice_number
set_child_names = True
publish(idx, len(invoices), d.doctype)
doc = frappe.get_doc(d)
doc.flags.ignore_mandatory = True
doc.insert(set_name=invoice_number, set_child_names=set_child_names)
doc.submit()
frappe.db.commit()
names.append(doc.name)
except Exception:
errors += 1
frappe.db.rollback()
message = "\n".join(["Data:", dumps(d, default=str, indent=4), "--" * 50, "\nException:", traceback.format_exc()])
frappe.log_error(title="Error while creating Opening Invoice", message=message)
frappe.db.commit()
if errors:
frappe.msgprint(_("You had {} errors while creating opening invoices. Check {} for more details")
.format(errors, "<a href='/app/List/Error Log' class='variant-click'>Error Log</a>"), indicator="red", title=_("Error Occured"))
return names
|
23,596 | def soiling_kimber(rainfall, cleaning_threshold=6, soiling_loss_rate=0.0015,
grace_period=14, max_soiling=0.3, manual_wash_dates=None,
initial_soiling=0, rain_accum_period=24, is_tmy=False):
"""
Calculates fraction of energy lossed due to soiling given rainfall data and
daily loss rate using the Kimber model.
Kimber soiling model [1]_ assumes soiling builds up at a daily rate unless
the daily rainfall is greater than a threshold. The model also assumes that
if daily rainfall has exceeded the threshold within a grace period, then
the ground is too damp to cause soiling build-up. The model also assumes
there is a maximum soiling build-up. Scheduled manual washes and rain
events are assumed to reset soiling to zero.
Parameters
----------
rainfall: pandas.Series
Accumulated rainfall at the end of each time period. [mm]
cleaning_threshold: float, default 6
Amount of daily rainfall required to clean the panels. [mm]
soiling_loss_rate: float, default 0.0015
Fraction of energy lost due to one day of soiling. [unitless]
grace_period : int, default 14
Number of days after a rainfall event when it's assumed the ground is
damp, and so it's assumed there is no soiling. [days]
max_soiling : float, default 0.3
Maximum fraction of energy lost due to soiling. Soiling will build up
until this value. [unitless]
manual_wash_dates : sequence or None, default None
List or tuple of dates as Python ``datetime.date`` when the panels were
washed manually. Note there is no grace period after a manual wash, so
soiling begins to build up immediately.
initial_soiling : float, default 0
Initial fraction of energy lost due to soiling at time zero in the
`rainfall` series input. [unitless]
rain_accum_period : int, default 24
Period for accumulating rainfall to check against `cleaning_threshold`.
The Kimber model defines this period as one day. [hours]
is_tmy : bool, default False
Fix last timestep in TMY so that it is monotonically increasing.
Returns
-------
pandas.Series
fraction of energy lost due to soiling, has same intervals as input
Notes
-----
The soiling loss rate depends on both the geographical region and the
soiling environment type. Rates measured by Kimber [1]_ are summarized in
the following table:
=================== ======= ========= ======================
Region/Environment Rural Suburban Urban/Highway/Airport
=================== ======= ========= ======================
Central Valley 0.0011 0.0019 0.0020
Northern CA 0.0011 0.0010 0.0016
Southern CA 0 0.0016 0.0019
Desert 0.0030 0.0030 0.0030
=================== ======= ========= ======================
Rainfall thresholds and grace periods may also vary by region. Please
consult [1]_ for more information.
References
----------
.. [1] "The Effect of Soiling on Large Grid-Connected Photovoltaic Systems
in California and the Southwest Region of the United States," Adrianne
Kimber, et al., IEEE 4th World Conference on Photovoltaic Energy
Conference, 2006, :doi:`10.1109/WCPEC.2006.279690`
"""
# convert rain_accum_period to timedelta
rain_accum_period = datetime.timedelta(hours=rain_accum_period)
# convert grace_period to timedelta
grace_period = datetime.timedelta(days=grace_period)
# get rainfall timezone, timestep as timedelta64, and timestep as day-frac
rain_tz = rainfall.index.tz
rain_index_vals = rainfall.index.values
timestep_interval = (rain_index_vals[1] - rain_index_vals[0])
day_fraction = timestep_interval / np.timedelta64(24, 'h')
# if TMY fix to be monotonically increasing by rolling index by 1 interval
# and then adding 1 interval, while the values stay the same
if is_tmy:
rain_vals = rainfall.values
rain_name = rainfall.name
rainfall = _fix_tmy_monotonicity(
rain_index_vals, rain_vals, timestep_interval, rain_tz, rain_name)
# accumulate rainfall
accumulated_rainfall = rainfall.rolling(
rain_accum_period, closed='right').sum()
# soiling rate
soiling = np.ones_like(rainfall.values) * soiling_loss_rate * day_fraction
soiling[0] = initial_soiling
soiling = np.cumsum(soiling)
soiling = pd.Series(soiling, index=rainfall.index, name='soiling')
# rainfall events that clean the panels
rain_events = accumulated_rainfall > cleaning_threshold
# grace periods windows during which ground is assumed damp, so no soiling
grace_windows = rain_events.rolling(grace_period, closed='right').sum() > 0
# clean panels by subtracting soiling for indices in grace period windows
cleaning = pd.Series(float('NaN'), index=rainfall.index)
cleaning.iloc[0] = 0.0
cleaning[grace_windows] = soiling[grace_windows]
# manual wash dates
if manual_wash_dates is not None:
manual_wash_dates = pd.DatetimeIndex(manual_wash_dates, tz=rain_tz)
cleaning[manual_wash_dates] = soiling[manual_wash_dates]
# remove soiling by foward filling cleaning where NaN
soiling -= cleaning.ffill()
# check if soiling has reached the maximum
return soiling.where(soiling < max_soiling, max_soiling)
| def soiling_kimber(rainfall, cleaning_threshold=6, soiling_loss_rate=0.0015,
grace_period=14, max_soiling=0.3, manual_wash_dates=None,
initial_soiling=0, rain_accum_period=24, is_tmy=False):
"""
Calculates fraction of energy lost due to soiling given rainfall data and
daily loss rate using the Kimber model.
Kimber soiling model [1]_ assumes soiling builds up at a daily rate unless
the daily rainfall is greater than a threshold. The model also assumes that
if daily rainfall has exceeded the threshold within a grace period, then
the ground is too damp to cause soiling build-up. The model also assumes
there is a maximum soiling build-up. Scheduled manual washes and rain
events are assumed to reset soiling to zero.
Parameters
----------
rainfall: pandas.Series
Accumulated rainfall at the end of each time period. [mm]
cleaning_threshold: float, default 6
Amount of daily rainfall required to clean the panels. [mm]
soiling_loss_rate: float, default 0.0015
Fraction of energy lost due to one day of soiling. [unitless]
grace_period : int, default 14
Number of days after a rainfall event when it's assumed the ground is
damp, and so it's assumed there is no soiling. [days]
max_soiling : float, default 0.3
Maximum fraction of energy lost due to soiling. Soiling will build up
until this value. [unitless]
manual_wash_dates : sequence or None, default None
List or tuple of dates as Python ``datetime.date`` when the panels were
washed manually. Note there is no grace period after a manual wash, so
soiling begins to build up immediately.
initial_soiling : float, default 0
Initial fraction of energy lost due to soiling at time zero in the
`rainfall` series input. [unitless]
rain_accum_period : int, default 24
Period for accumulating rainfall to check against `cleaning_threshold`.
The Kimber model defines this period as one day. [hours]
is_tmy : bool, default False
Fix last timestep in TMY so that it is monotonically increasing.
Returns
-------
pandas.Series
fraction of energy lost due to soiling, has same intervals as input
Notes
-----
The soiling loss rate depends on both the geographical region and the
soiling environment type. Rates measured by Kimber [1]_ are summarized in
the following table:
=================== ======= ========= ======================
Region/Environment Rural Suburban Urban/Highway/Airport
=================== ======= ========= ======================
Central Valley 0.0011 0.0019 0.0020
Northern CA 0.0011 0.0010 0.0016
Southern CA 0 0.0016 0.0019
Desert 0.0030 0.0030 0.0030
=================== ======= ========= ======================
Rainfall thresholds and grace periods may also vary by region. Please
consult [1]_ for more information.
References
----------
.. [1] "The Effect of Soiling on Large Grid-Connected Photovoltaic Systems
in California and the Southwest Region of the United States," Adrianne
Kimber, et al., IEEE 4th World Conference on Photovoltaic Energy
Conference, 2006, :doi:`10.1109/WCPEC.2006.279690`
"""
# convert rain_accum_period to timedelta
rain_accum_period = datetime.timedelta(hours=rain_accum_period)
# convert grace_period to timedelta
grace_period = datetime.timedelta(days=grace_period)
# get rainfall timezone, timestep as timedelta64, and timestep as day-frac
rain_tz = rainfall.index.tz
rain_index_vals = rainfall.index.values
timestep_interval = (rain_index_vals[1] - rain_index_vals[0])
day_fraction = timestep_interval / np.timedelta64(24, 'h')
# if TMY fix to be monotonically increasing by rolling index by 1 interval
# and then adding 1 interval, while the values stay the same
if is_tmy:
rain_vals = rainfall.values
rain_name = rainfall.name
rainfall = _fix_tmy_monotonicity(
rain_index_vals, rain_vals, timestep_interval, rain_tz, rain_name)
# accumulate rainfall
accumulated_rainfall = rainfall.rolling(
rain_accum_period, closed='right').sum()
# soiling rate
soiling = np.ones_like(rainfall.values) * soiling_loss_rate * day_fraction
soiling[0] = initial_soiling
soiling = np.cumsum(soiling)
soiling = pd.Series(soiling, index=rainfall.index, name='soiling')
# rainfall events that clean the panels
rain_events = accumulated_rainfall > cleaning_threshold
# grace periods windows during which ground is assumed damp, so no soiling
grace_windows = rain_events.rolling(grace_period, closed='right').sum() > 0
# clean panels by subtracting soiling for indices in grace period windows
cleaning = pd.Series(float('NaN'), index=rainfall.index)
cleaning.iloc[0] = 0.0
cleaning[grace_windows] = soiling[grace_windows]
# manual wash dates
if manual_wash_dates is not None:
manual_wash_dates = pd.DatetimeIndex(manual_wash_dates, tz=rain_tz)
cleaning[manual_wash_dates] = soiling[manual_wash_dates]
# remove soiling by foward filling cleaning where NaN
soiling -= cleaning.ffill()
# check if soiling has reached the maximum
return soiling.where(soiling < max_soiling, max_soiling)
|
37,795 | def build(project_dir, output_dir, test_command, test_requires, test_extras, before_build, build_verbosity, build_selector, environment):
def simple_shell(args, env=None, cwd=None):
print('+ ' + ' '.join(args))
args = ['cmd', '/E:ON', '/V:ON', '/C'] + args
return subprocess.check_call(' '.join(args), env=env, cwd=cwd)
def download(url, dest):
print('+ Download ' + url + ' to ' + dest)
response = urlopen(url)
try:
with open(dest, 'wb') as file:
file.write(response.read())
finally:
response.close()
if IS_RUNNING_ON_AZURE or IS_RUNNING_ON_TRAVIS:
shell = simple_shell
else:
run_with_env = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', 'appveyor_run_with_env.cmd'))
# run_with_env is a cmd file that sets the right environment variables
# to build on AppVeyor.
def shell(args, env=None, cwd=None):
# print the command executing for the logs
print('+ ' + ' '.join(args))
args = ['cmd', '/E:ON', '/V:ON', '/C', run_with_env] + args
return subprocess.check_call(' '.join(args), env=env, cwd=cwd)
abs_project_dir = os.path.abspath(project_dir)
temp_dir = tempfile.mkdtemp(prefix='cibuildwheel')
built_wheel_dir = os.path.join(temp_dir, 'built_wheel')
# instal nuget as best way for provide python
nuget = 'C:\\nuget.exe'
download('https://dist.nuget.org/win-x86-commandline/latest/nuget.exe', nuget)
# get pip fo this installation which not have.
get_pip_script = 'C:\\get-pip.py'
download('https://bootstrap.pypa.io/get-pip.py', get_pip_script)
python_configurations = get_python_configurations(build_selector)
for config in python_configurations:
config_python_path = get_python_path(config)
simple_shell([nuget, "install"] + get_nuget_args(config))
if not os.path.exists(os.path.join(config_python_path, 'Scripts', 'pip.exe')):
simple_shell([os.path.join(config_python_path, 'python.exe'), get_pip_script ])
# check python & pip exist for this configuration
assert os.path.exists(os.path.join(config_python_path, 'python.exe'))
assert os.path.exists(os.path.join(config_python_path, 'Scripts', 'pip.exe'))
# setup dirs
if os.path.exists(built_wheel_dir):
shutil.rmtree(built_wheel_dir)
os.makedirs(built_wheel_dir)
env = os.environ.copy()
# set up environment variables for run_with_env
env['PYTHON_VERSION'] = config.version
env['PYTHON_ARCH'] = config.arch
env['PATH'] = os.pathsep.join([
config_python_path,
os.path.join(config_python_path, 'Scripts'),
env['PATH']
])
env = environment.as_dictionary(prev_environment=env)
# for the logs - check we're running the right version of python
shell(['python', '--version'], env=env)
shell(['python', '-c', '"import struct; print(struct.calcsize(\'P\') * 8)\"'], env=env)
# prepare the Python environment
shell(['python', '-m', 'pip', 'install', '--upgrade', 'pip'], env=env)
shell(['pip', 'install', '--upgrade', 'setuptools'], env=env)
shell(['pip', 'install', 'wheel'], env=env)
# run the before_build command
if before_build:
before_build_prepared = prepare_command(before_build, project=abs_project_dir)
shell([before_build_prepared], env=env)
# build the wheel
shell(['pip', 'wheel', abs_project_dir, '-w', built_wheel_dir, '--no-deps'] + get_build_verbosity_extra_flags(build_verbosity), env=env)
built_wheel = glob(built_wheel_dir+'/*.whl')[0]
if test_command:
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
shell(['pip', 'install', 'virtualenv'], env=env)
venv_dir = tempfile.mkdtemp()
shell(['python', '-m', 'virtualenv', venv_dir], env=env)
virtualenv_env = env.copy()
virtualenv_env['PATH'] = os.pathsep.join([
os.path.join(venv_dir, 'Scripts'),
virtualenv_env['PATH'],
])
# check that we are using the Python from the virtual environment
shell(['which', 'python'], env=virtualenv_env)
# install the wheel
shell(['pip', 'install', built_wheel + test_extras], env=virtualenv_env)
# test the wheel
if test_requires:
shell(['pip', 'install'] + test_requires, env=virtualenv_env)
# run the tests from c:\, with an absolute path in the command
# (this ensures that Python runs the tests against the installed wheel
# and not the repo code)
test_command_prepared = prepare_command(test_command, project=abs_project_dir)
shell([test_command_prepared], cwd='c:\\', env=virtualenv_env)
# clean up
shutil.rmtree(venv_dir)
# we're all done here; move it to output (remove if already exists)
dst = os.path.join(output_dir, os.path.basename(built_wheel))
if os.path.isfile(dst):
os.remove(dst)
shutil.move(built_wheel, dst)
| def build(project_dir, output_dir, test_command, test_requires, test_extras, before_build, build_verbosity, build_selector, environment):
def simple_shell(args, env=None, cwd=None):
print('+ ' + ' '.join(args))
args = ['cmd', '/E:ON', '/V:ON', '/C'] + args
return subprocess.check_call(' '.join(args), env=env, cwd=cwd)
def download(url, dest):
print('+ Download ' + url + ' to ' + dest)
response = urlopen(url)
try:
with open(dest, 'wb') as file:
file.write(response.read())
finally:
response.close()
if IS_RUNNING_ON_AZURE or IS_RUNNING_ON_TRAVIS:
shell = simple_shell
else:
run_with_env = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', 'appveyor_run_with_env.cmd'))
# run_with_env is a cmd file that sets the right environment variables
# to build on AppVeyor.
def shell(args, env=None, cwd=None):
# print the command executing for the logs
print('+ ' + ' '.join(args))
args = ['cmd', '/E:ON', '/V:ON', '/C', run_with_env] + args
return subprocess.check_call(' '.join(args), env=env, cwd=cwd)
abs_project_dir = os.path.abspath(project_dir)
temp_dir = tempfile.mkdtemp(prefix='cibuildwheel')
built_wheel_dir = os.path.join(temp_dir, 'built_wheel')
# install nuget as best way to provide python
nuget = 'C:\\nuget.exe'
download('https://dist.nuget.org/win-x86-commandline/latest/nuget.exe', nuget)
# get pip fo this installation which not have.
get_pip_script = 'C:\\get-pip.py'
download('https://bootstrap.pypa.io/get-pip.py', get_pip_script)
python_configurations = get_python_configurations(build_selector)
for config in python_configurations:
config_python_path = get_python_path(config)
simple_shell([nuget, "install"] + get_nuget_args(config))
if not os.path.exists(os.path.join(config_python_path, 'Scripts', 'pip.exe')):
simple_shell([os.path.join(config_python_path, 'python.exe'), get_pip_script ])
# check python & pip exist for this configuration
assert os.path.exists(os.path.join(config_python_path, 'python.exe'))
assert os.path.exists(os.path.join(config_python_path, 'Scripts', 'pip.exe'))
# setup dirs
if os.path.exists(built_wheel_dir):
shutil.rmtree(built_wheel_dir)
os.makedirs(built_wheel_dir)
env = os.environ.copy()
# set up environment variables for run_with_env
env['PYTHON_VERSION'] = config.version
env['PYTHON_ARCH'] = config.arch
env['PATH'] = os.pathsep.join([
config_python_path,
os.path.join(config_python_path, 'Scripts'),
env['PATH']
])
env = environment.as_dictionary(prev_environment=env)
# for the logs - check we're running the right version of python
shell(['python', '--version'], env=env)
shell(['python', '-c', '"import struct; print(struct.calcsize(\'P\') * 8)\"'], env=env)
# prepare the Python environment
shell(['python', '-m', 'pip', 'install', '--upgrade', 'pip'], env=env)
shell(['pip', 'install', '--upgrade', 'setuptools'], env=env)
shell(['pip', 'install', 'wheel'], env=env)
# run the before_build command
if before_build:
before_build_prepared = prepare_command(before_build, project=abs_project_dir)
shell([before_build_prepared], env=env)
# build the wheel
shell(['pip', 'wheel', abs_project_dir, '-w', built_wheel_dir, '--no-deps'] + get_build_verbosity_extra_flags(build_verbosity), env=env)
built_wheel = glob(built_wheel_dir+'/*.whl')[0]
if test_command:
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
shell(['pip', 'install', 'virtualenv'], env=env)
venv_dir = tempfile.mkdtemp()
shell(['python', '-m', 'virtualenv', venv_dir], env=env)
virtualenv_env = env.copy()
virtualenv_env['PATH'] = os.pathsep.join([
os.path.join(venv_dir, 'Scripts'),
virtualenv_env['PATH'],
])
# check that we are using the Python from the virtual environment
shell(['which', 'python'], env=virtualenv_env)
# install the wheel
shell(['pip', 'install', built_wheel + test_extras], env=virtualenv_env)
# test the wheel
if test_requires:
shell(['pip', 'install'] + test_requires, env=virtualenv_env)
# run the tests from c:\, with an absolute path in the command
# (this ensures that Python runs the tests against the installed wheel
# and not the repo code)
test_command_prepared = prepare_command(test_command, project=abs_project_dir)
shell([test_command_prepared], cwd='c:\\', env=virtualenv_env)
# clean up
shutil.rmtree(venv_dir)
# we're all done here; move it to output (remove if already exists)
dst = os.path.join(output_dir, os.path.basename(built_wheel))
if os.path.isfile(dst):
os.remove(dst)
shutil.move(built_wheel, dst)
|
29,799 | def paasta_mesh_status(args) -> int:
system_paasta_config = load_system_paasta_config()
return_codes = [0]
tasks = []
clusters_services_instances = apply_args_filters(args)
for cluster, service_instances in clusters_services_instances.items():
for service, instances in service_instances.items():
tasks.append(
(
report_mesh_status_for_cluster,
dict(
cluster=cluster,
service=service,
instances=list(instances.keys()),
system_paasta_config=system_paasta_config,
verbose=args.verbose,
),
)
)
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
tasks = [executor.submit(t[0], **t[1]) for t in tasks] # type: ignore
for future in concurrent.futures.as_completed(tasks): # type: ignore
return_code, output = future.result()
print("\n".join(output))
return_codes.append(return_code)
return max(return_codes)
| def paasta_mesh_status(args) -> int:
system_paasta_config = load_system_paasta_config()
return_codes = [0]
tasks = []
clusters_services_instances = apply_args_filters(args)
for cluster, service_instances in clusters_services_instances.items():
for service, instances in service_instances.items():
tasks.append(
(
report_mesh_status_for_cluster,
dict(
cluster=cluster,
service=service,
instances=list(instances.keys()),
system_paasta_config=system_paasta_config,
verbose=args.verbose,
),
)
)
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
tasks = [executor.submit(func, **params) for func, params in tasks] # type: ignore
for future in concurrent.futures.as_completed(tasks): # type: ignore
return_code, output = future.result()
print("\n".join(output))
return_codes.append(return_code)
return max(return_codes)
|
12,891 | def generate_query_argument_description(search_fields):
deprecated_info = (
"DEPRECATED: Will be removed in Saleor 2.10,"
' use "filter: {search: {}}" instead.\n'
)
header = "Supported filter parameters:\n"
supported_list = ""
for field in search_fields:
supported_list += "* {0}\n".format(field)
return deprecated_info + header + supported_list
| def generate_query_argument_description(search_fields):
deprecated_info = (
"DEPRECATED: Will be removed in Saleor 2.10,"
" use `filter: {search: {}}` instead.\n"
)
header = "Supported filter parameters:\n"
supported_list = ""
for field in search_fields:
supported_list += "* {0}\n".format(field)
return deprecated_info + header + supported_list
|
38,840 | def _find_latest_cached(url: str, cache_dir: str) -> Optional[str]:
filename = url_to_filename(url)
cache_path = os.path.join(cache_dir, filename)
candidates: List[Tuple[str, float]] = []
for path in glob.glob(cache_path + "*"):
if path.endswith(".json"):
continue
mtime = os.path.getmtime(path)
candidates.append((path, mtime))
# Sort candidates by modification time, neweste first.
candidates.sort(key=lambda x: x[1], reverse=True)
if candidates:
return candidates[0][0]
return None
| def _find_latest_cached(url: str, cache_dir: str) -> Optional[str]:
filename = url_to_filename(url)
cache_path = os.path.join(cache_dir, filename)
candidates: List[Tuple[str, float]] = []
for path in glob.glob(cache_path + "*"):
if path.endswith(".json"):
continue
mtime = os.path.getmtime(path)
candidates.append((path, mtime))
# Sort candidates by modification time, newest first.
candidates.sort(key=lambda x: x[1], reverse=True)
if candidates:
return candidates[0][0]
return None
|
39,661 | def main():
module = ForemanEntityApypieAnsibleModule(
argument_spec=dict(
name=dict(required=True),
operatingsystems=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
(entity_dict, state) = module.parse_params()
module.connect()
if 'operatingsystems' in entity_dict:
entity_dict['operatingsystems'] = module.find_resources('operatingsystems', entity_dict['operatingsystems'], thin=True)
entity = module.find_resource_by_name('architectures', name=entity_dict['name'], failsafe=True)
changed = module.ensure_resource_state('architectures', entity_dict, entity, state, name_map)
module.exit_json(changed=changed)
| def main():
module = ForemanEntityApypieAnsibleModule(
argument_spec=dict(
name=dict(required=True),
operatingsystems=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
entity_dict = module.clean_params()
module.connect()
if 'operatingsystems' in entity_dict:
entity_dict['operatingsystems'] = module.find_resources('operatingsystems', entity_dict['operatingsystems'], thin=True)
entity = module.find_resource_by_name('architectures', name=entity_dict['name'], failsafe=True)
changed = module.ensure_resource_state('architectures', entity_dict, entity, state, name_map)
module.exit_json(changed=changed)
|
5,836 | def arr_to_chars(arr):
''' Convert string array to char array '''
dims = list(arr.shape)
if not dims:
dims = [1]
dims.append(int(arr.dtype.str[2:]))
arr = np.ndarray(shape=dims,
dtype=arr_dtype_number(arr, 1),
buffer=arr)
empties = [arr == np.array([''], dtype=arr_dtype_number(arr, 1))]
if not np.any(empties):
return arr
arr = arr.copy()
arr[tuple(empties)] = ' '
return arr
| def arr_to_chars(arr):
''' Convert string array to char array '''
dims = list(arr.shape)
if not dims:
dims = [1]
dims.append(int(arr.dtype.str[2:]))
arr = np.ndarray(shape=dims,
dtype=arr_dtype_number(arr, 1),
buffer=arr)
empties = [arr == np.array('', dtype=arr.dtype)]
if not np.any(empties):
return arr
arr = arr.copy()
arr[tuple(empties)] = ' '
return arr
|
25,068 | def compute_chunksize(src, w, h, chunksize=None, max_mem=None):
"""
Attempts to compute a chunksize for the resampling output array
that is as close as possible to the input array chunksize, while
also respecting the maximum memory constraint to avoid loading
to much data into memory at the same time.
Parameters
----------
src : dask.array.Array
The source array to resample
w : int
New grid width
h : int
New grid height
chunksize : tuple(int, int) (optional)
Size of the output chunks. By default this the chunk size is
inherited from the *src* array.
max_mem : int (optional)
The maximum number of bytes that should be loaded into memory
during the regridding operation.
Returns
-------
chunksize : tuple(int, int)
Size of the output chunks.
"""
start_chunksize = src.chunksize if chunksize is None else chunksize
if max_mem is None:
return start_chunksize
sh, sw = src.shape
height_fraction = float(sh)/h
width_fraction = float(sw)/w
ch, cw = start_chunksize
dim = True
nbytes = src.dtype.itemsize
while ((ch * height_fraction) * (cw * width_fraction) * nbytes) > max_mem:
if dim:
cw -= 1
else:
ch -= 1
dim = not dim
if ch == 0 or cw == 0:
min_mem = height_fraction * width_fraction * nbytes
raise ValueError(
"Given the memory constraints the resampling operation "
"could not find a chunksize that avoids loading too much "
"data into memory. Either relax the memory constraint to "
"a minimum of %d bytes or resample to a larger grid size. "
"Note: A future implementation may handle this condition "
"by declaring temporary arrays." % min_mem)
return ch, cw
| def compute_chunksize(src, w, h, chunksize=None, max_mem=None):
"""
Attempts to compute a chunksize for the resampling output array
that is as close as possible to the input array chunksize, while
also respecting the maximum memory constraint to avoid loading
to much data into memory at the same time.
Parameters
----------
src : dask.array.Array
The source array to resample
w : int
New grid width
h : int
New grid height
chunksize : tuple(int, int) (optional)
Size of the output chunks. By default the chunk size is
inherited from the *src* array.
max_mem : int (optional)
The maximum number of bytes that should be loaded into memory
during the regridding operation.
Returns
-------
chunksize : tuple(int, int)
Size of the output chunks.
"""
start_chunksize = src.chunksize if chunksize is None else chunksize
if max_mem is None:
return start_chunksize
sh, sw = src.shape
height_fraction = float(sh)/h
width_fraction = float(sw)/w
ch, cw = start_chunksize
dim = True
nbytes = src.dtype.itemsize
while ((ch * height_fraction) * (cw * width_fraction) * nbytes) > max_mem:
if dim:
cw -= 1
else:
ch -= 1
dim = not dim
if ch == 0 or cw == 0:
min_mem = height_fraction * width_fraction * nbytes
raise ValueError(
"Given the memory constraints the resampling operation "
"could not find a chunksize that avoids loading too much "
"data into memory. Either relax the memory constraint to "
"a minimum of %d bytes or resample to a larger grid size. "
"Note: A future implementation may handle this condition "
"by declaring temporary arrays." % min_mem)
return ch, cw
|
23,109 | def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=[],
new_axis=None,
meta=None,
**kwargs,
):
"""Map a function across all blocks of a dask array.
Note that ``map_blocks`` will attempt to automatically determine the output
array type by calling ``func`` on 0-d versions of the inputs. Please refer to
the ``meta`` keyword argument below if you expect that the function will not
succeed when operating on 0-d arrays.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
meta : array-like, optional
The ``meta`` of the output array, when specified is expected to be an
array of the same type and dtype of that returned when calling ``.compute()``
on the array returned by this function. When not provided, ``meta`` will be
inferred by applying the function to a small set of fake data, usually a
0-d array. It's important to ensure that ``func`` can successfully complete
computation without raising exceptions when 0-d is passed to it, providing
``meta`` will be required otherwise. If the output type is known beforehand
(e.g., ``np.ndarray``, ``cupy.ndarray``), an empty array of such type dtype
can be passed, for example: ``meta=np.array((), dtype=np.int32)``.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = da.map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function get information about where it is in the array by
accepting a special ``block_info`` or ``block_id`` keyword argument.
>>> def func(block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
For each argument and keyword arguments that are dask arrays (the positions
of which are the first index), you will receive the shape of the full
array, the number of chunks of the full array in each dimension, the chunk
location (for example the fourth chunk over in the first dimension), and
the array location (for example the slice corresponding to ``40:50``). The
same information is provided for the output, with the key ``None``, plus
the shape and dtype that should be returned.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
``block_id`` is similar to ``block_info`` but contains only the ``chunk_location``:
>>> def func(block_id=None):
... pass
This will receive the following information:
>>> block_id # doctest: +SKIP
(4, 3)
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment')
dask.array<increment, shape=(1000,), dtype=int64, chunksize=(100,), chunktype=numpy.ndarray>
For functions that may not handle 0-d arrays, it's also possible to specify
``meta`` with an empty array matching the type of the expected result. In
the example below, ``func`` will result in an ``IndexError`` when computing
``meta``:
>>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
Similarly, it's possible to specify a non-NumPy array to ``meta``, and provide
a ``dtype``:
>>> import cupy # doctest: +SKIP
>>> rs = da.random.RandomState(RandomState=cupy.random.RandomState) # doctest: +SKIP
>>> dt = np.float32
>>> da.map_blocks(lambda x: x[2], rs.random(5, dtype=dt), meta=cupy.array((), dtype=dt)) # doctest: +SKIP
dask.array<lambda, shape=(5,), dtype=float32, chunksize=(5,), chunktype=cupy.ndarray>
"""
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
if token:
warnings.warn("The token= keyword to map_blocks has been moved to name=")
name = token
name = "%s-%s" % (name or funcname(func), tokenize(func, *args, **kwargs))
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = [
(a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)
for a in args
]
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
try:
meta = compute_meta(func, dtype, *args, **kwargs)
except Exception:
pass
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
if any(i < 0 for i in drop_axis):
raise ValueError("Drop_axis is {0}, expected axis to be non-negative.".format(drop_axis))
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
"Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(out_ind))
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an array of block IDs and
# prepare to inject it.
if has_keyword(func, "block_id"):
block_id_name = "block-id-" + out.name
block_id_dsk = {
(block_id_name,) + block_id: block_id
for block_id in product(*(range(len(c)) for c in out.chunks))
}
block_id_array = Array(
block_id_dsk,
block_id_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_id_array, out_ind))
extra_names.append("block_id")
# If func has block_info as an argument, construct an array of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_name = "block-info-" + out.name
block_info_dsk = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dsk[(block_info_name,) + block_id] = info
block_info = Array(
block_info_dsk,
block_info_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_info, out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
| def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=[],
new_axis=None,
meta=None,
**kwargs,
):
"""Map a function across all blocks of a dask array.
Note that ``map_blocks`` will attempt to automatically determine the output
array type by calling ``func`` on 0-d versions of the inputs. Please refer to
the ``meta`` keyword argument below if you expect that the function will not
succeed when operating on 0-d arrays.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
meta : array-like, optional
The ``meta`` of the output array, when specified is expected to be an
array of the same type and dtype of that returned when calling ``.compute()``
on the array returned by this function. When not provided, ``meta`` will be
inferred by applying the function to a small set of fake data, usually a
0-d array. It's important to ensure that ``func`` can successfully complete
computation without raising exceptions when 0-d is passed to it, providing
``meta`` will be required otherwise. If the output type is known beforehand
(e.g., ``np.ndarray``, ``cupy.ndarray``), an empty array of such type dtype
can be passed, for example: ``meta=np.array((), dtype=np.int32)``.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = da.map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function get information about where it is in the array by
accepting a special ``block_info`` or ``block_id`` keyword argument.
>>> def func(block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
For each argument and keyword arguments that are dask arrays (the positions
of which are the first index), you will receive the shape of the full
array, the number of chunks of the full array in each dimension, the chunk
location (for example the fourth chunk over in the first dimension), and
the array location (for example the slice corresponding to ``40:50``). The
same information is provided for the output, with the key ``None``, plus
the shape and dtype that should be returned.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
``block_id`` is similar to ``block_info`` but contains only the ``chunk_location``:
>>> def func(block_id=None):
... pass
This will receive the following information:
>>> block_id # doctest: +SKIP
(4, 3)
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment')
dask.array<increment, shape=(1000,), dtype=int64, chunksize=(100,), chunktype=numpy.ndarray>
For functions that may not handle 0-d arrays, it's also possible to specify
``meta`` with an empty array matching the type of the expected result. In
the example below, ``func`` will result in an ``IndexError`` when computing
``meta``:
>>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
Similarly, it's possible to specify a non-NumPy array to ``meta``, and provide
a ``dtype``:
>>> import cupy # doctest: +SKIP
>>> rs = da.random.RandomState(RandomState=cupy.random.RandomState) # doctest: +SKIP
>>> dt = np.float32
>>> da.map_blocks(lambda x: x[2], rs.random(5, dtype=dt), meta=cupy.array((), dtype=dt)) # doctest: +SKIP
dask.array<lambda, shape=(5,), dtype=float32, chunksize=(5,), chunktype=cupy.ndarray>
"""
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
if token:
warnings.warn("The token= keyword to map_blocks has been moved to name=")
name = token
name = "%s-%s" % (name or funcname(func), tokenize(func, *args, **kwargs))
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = [
(a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)
for a in args
]
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
try:
meta = compute_meta(func, dtype, *args, **kwargs)
except Exception:
pass
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
if any(i < 0 for i in drop_axis):
raise ValueError(f"Expected drop_axis to be non-negative; got {drop_axis}.")
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
"Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(out_ind))
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an array of block IDs and
# prepare to inject it.
if has_keyword(func, "block_id"):
block_id_name = "block-id-" + out.name
block_id_dsk = {
(block_id_name,) + block_id: block_id
for block_id in product(*(range(len(c)) for c in out.chunks))
}
block_id_array = Array(
block_id_dsk,
block_id_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_id_array, out_ind))
extra_names.append("block_id")
# If func has block_info as an argument, construct an array of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_name = "block-info-" + out.name
block_info_dsk = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dsk[(block_info_name,) + block_id] = info
block_info = Array(
block_info_dsk,
block_info_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_info, out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
|
42,009 | def test_interpolate_zmap() -> None:
contour_point_num = 2
zmap = {0 + 0j: 1, 1 + 1j: 4}
interpolated = {1 + 0j: 2.5, 1 + 1j: 4, 0 + 0j: 1, 0 + 1j: 2.5}
zmap = _interpolate_zmap(zmap, contour_point_num) # type: ignore
for coord, value in zmap.items():
expected_at_coord = interpolated.get(coord)
assert value == expected_at_coord
| def test_interpolate_zmap() -> None:
contour_point_num = 2
zmap = {0 + 0j: 1, 1 + 1j: 4}
interpolated = {1 + 0j: 2.5, 1 + 1j: 4, 0 + 0j: 1, 0 + 1j: 2.5}
_interpolate_zmap(zmap, contour_point_num) # type: ignore
for coord, value in zmap.items():
expected_at_coord = interpolated.get(coord)
assert value == expected_at_coord
|
13,999 | def show_versions(as_json=False):
"""
Print system information and installed module versions.
Example
-------
> python -c "import geopandas as gpd; gpd.show_versions()"
"""
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("conda-forge", lambda mod: mod.version.version),
("shapely", lambda mod: mod.__version__),
("fiona", lambda mod: mod.__version__),
("pyproj", lambda mod: mod.__version__),
("six", lambda mod: mod.__version__),
("rtree", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except ImportError:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except ImportError:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("{k}: {stat}".format(k=k, stat=stat))
print("")
for k, stat in deps_blob:
print("{k}: {stat}".format(k=k, stat=stat))
| def show_versions(as_json=False):
"""
Print system information and installed module versions.
Example
-------
> python -c "import geopandas; geopandas.show_versions()"
"""
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("conda-forge", lambda mod: mod.version.version),
("shapely", lambda mod: mod.__version__),
("fiona", lambda mod: mod.__version__),
("pyproj", lambda mod: mod.__version__),
("six", lambda mod: mod.__version__),
("rtree", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except ImportError:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except ImportError:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("{k}: {stat}".format(k=k, stat=stat))
print("")
for k, stat in deps_blob:
print("{k}: {stat}".format(k=k, stat=stat))
|
26,560 | def show_dag(args):
dag = get_dag(args)
dot = graphviz.Digraph(args.dag_id, graph_attr={
'rankdir': 'LR',
'labelloc': "t",
'label': args.dag_id
})
for task in dag.tasks:
dot.node(task.task_id)
for downstream_task_id in task.downstream_task_ids:
dot.edge(task.task_id, downstream_task_id)
if args.save:
filename, _, format = args.save.rpartition('.')
dot.render(filename=filename, format=format, cleanup=True)
print("File {} saved".format(args.save))
elif args.imgcat:
data = dot.pipe(format='png')
try:
proc = subprocess.Popen("imgcat", stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise AirflowException(
"Failed to execute. Make sure the imgcat executables are on your systems \'PATH\'"
)
else:
raise
out, err = proc.communicate(data)
if out:
print(out.decode('utf-8'))
if err:
print(err.decode('utf-8'))
else:
print(dot.source)
| def show_dag(args):
dag = get_dag(args)
dot = graphviz.Digraph(args.dag_id, graph_attr={
'rankdir': 'LR',
'labelloc': "t",
'label': args.dag_id
})
for task in dag.tasks:
dot.node(task.task_id)
for downstream_task_id in task.downstream_task_ids:
dot.edge(task.task_id, downstream_task_id)
if args.save:
filename, _, fileformat = args.save.rpartition('.')
dot.render(filename=filename, format=format, cleanup=True)
print("File {} saved".format(args.save))
elif args.imgcat:
data = dot.pipe(format='png')
try:
proc = subprocess.Popen("imgcat", stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise AirflowException(
"Failed to execute. Make sure the imgcat executables are on your systems \'PATH\'"
)
else:
raise
out, err = proc.communicate(data)
if out:
print(out.decode('utf-8'))
if err:
print(err.decode('utf-8'))
else:
print(dot.source)
|
54,286 | def dedupe_parameters(parameters):
duplicates = {}
for p in parameters:
duplicates.setdefault(p['name'], []).append(p)
for pname in duplicates.keys():
parameter_list = duplicates[pname]
if len(parameter_list) == 1:
continue
elif any(p != parameter_list[0] for p in parameter_list[1:]):
for p in parameter_list:
log.warning(p)
raise RuntimeError(
'cannot import workspace due to incompatible parameter configurations for {0:s}.'.format(
pname
)
)
# no errors raised, de-dupe and return
return list({v['name']: v for v in parameters}.values())
| def dedupe_parameters(parameters):
duplicates = {}
for p in parameters:
duplicates.setdefault(p['name'], []).append(p)
for parname in duplicates.keys():
parameter_list = duplicates[parname]
parameter_list = duplicates[pname]
if len(parameter_list) == 1:
continue
elif any(p != parameter_list[0] for p in parameter_list[1:]):
for p in parameter_list:
log.warning(p)
raise RuntimeError(
'cannot import workspace due to incompatible parameter configurations for {0:s}.'.format(
pname
)
)
# no errors raised, de-dupe and return
return list({v['name']: v for v in parameters}.values())
|
3,649 | def requires_memory(free_bytes):
"""Decorator to skip a test if not enough memory is available"""
import pytest
env_var = 'NPY_AVAILABLE_MEM'
env_value = os.environ.get(env_var)
if env_value is not None:
try:
mem_free = _parse_size(env_value)
except ValueError as exc:
raise ValueError('Invalid environment variable {}: {!s}'.format(
env_var, exc))
msg = ('{0} GB memory required, but environment variable '
'NPY_AVAILABLE_MEM={1} set'.format(
free_bytes/1e9, env_value))
else:
mem_free = _get_mem_available()
if mem_free is None:
msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
"environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
"the test.")
mem_free = -1
else:
msg = '{0} GB memory required, but {1} GB available'.format(
free_bytes/1e9, mem_free/1e9)
return pytest.mark.skipif(mem_free < free_bytes, reason=msg)
| def requires_memory(free_bytes):
"""Decorator to skip a test if not enough memory is available"""
import pytest
env_var = 'NPY_AVAILABLE_MEM'
env_value = os.environ.get(env_var)
if env_value is not None:
try:
mem_free = _parse_size(env_value)
except ValueError as exc:
raise ValueError('Invalid environment variable {}: {!s}'.format(
env_var, exc))
msg = ('{0} GB memory required, but environment variable '
'NPY_AVAILABLE_MEM="{1} GB" set'.format(
free_bytes/1e9, env_value))
else:
mem_free = _get_mem_available()
if mem_free is None:
msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
"environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
"the test.")
mem_free = -1
else:
msg = '{0} GB memory required, but {1} GB available'.format(
free_bytes/1e9, mem_free/1e9)
return pytest.mark.skipif(mem_free < free_bytes, reason=msg)
|
48,602 | def sdc_pandas_series_operator_binop(self, other):
"""
Pandas Series operator :attr:`pandas.Series.binop` implementation
Note: Currently implemented for numeric Series only.
Differs from Pandas in returning Series with fixed dtype :obj:`float64`
.. only:: developer
**Test**: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op1*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op2*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_operator_binop*
Parameters
----------
series: :obj:`pandas.Series`
Input series
other: :obj:`pandas.Series` or :obj:`scalar`
Series or scalar value to be used as a second argument of binary operation
Returns
-------
:obj:`pandas.Series`
The result of the operation
"""
_func_name = 'Operator binop().'
ty_checker = TypeChecker('Operator binop().')
ty_checker.check(self, SeriesType)
if not isinstance(other, (SeriesType, types.Number)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
if isinstance(other, SeriesType):
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_data_comparable = check_types_comparable(self.data, other.data)
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if isinstance(other, SeriesType) and not series_data_comparable:
raise TypingError('{} Not supported for series with not-comparable data. \
Given: self.data={}, other.data={}'.format(_func_name, self.data, other.data))
if isinstance(other, SeriesType) and not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
# specializations for numeric series - TODO: support arithmetic operation on StringArrays
if (isinstance(other, types.Number)):
def _series_operator_binop_scalar_impl(self, other):
result_data = self._data.astype(numpy.float64) + numpy.float64(other)
return pandas.Series(result_data, index=self._index, name=self._name)
return _series_operator_binop_scalar_impl
elif (isinstance(other, SeriesType)):
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_operator_binop_none_indexes_impl(self, other):
if (len(self._data) == len(other._data)):
result_data = self._data.astype(numpy.float64)
result_data = result_data + other._data.astype(numpy.float64)
return pandas.Series(result_data)
else:
left_size, right_size = len(self._data), len(other._data)
min_data_size = min(left_size, right_size)
max_data_size = max(left_size, right_size)
result_data = numpy.empty(max_data_size, dtype=numpy.float64)
if (left_size == min_data_size):
result_data[:min_data_size] = self._data
result_data[min_data_size:] = numpy.nan
result_data = result_data + other._data.astype(numpy.float64)
else:
result_data[:min_data_size] = other._data
result_data[min_data_size:] = numpy.nan
result_data = self._data.astype(numpy.float64) + result_data
return pandas.Series(result_data, self._index)
return _series_operator_binop_none_indexes_impl
else:
# for numeric indexes find common dtype to be used when creating joined index
if none_or_numeric_indexes:
ty_left_index_dtype = types.int64 if isinstance(self.index, types.NoneType) else self.index.dtype
ty_right_index_dtype = types.int64 if isinstance(other.index, types.NoneType) else other.index.dtype
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[ty_left_index_dtype, ty_right_index_dtype], [])
def _series_operator_binop_common_impl(self, other):
left_index, right_index = self.index, other.index
# check if indexes are equal and series don't have to be aligned
if sdc_check_indexes_equal(left_index, right_index):
result_data = self._data.astype(numpy.float64)
result_data = result_data + other._data.astype(numpy.float64)
if none_or_numeric_indexes == True: # noqa
result_index = left_index.astype(numba_index_common_dtype)
else:
result_index = self._index
return pandas.Series(result_data, index=result_index)
# TODO: replace below with core join(how='outer', return_indexers=True) when implemented
joined_index, left_indexer, right_indexer = sdc_join_series_indexes(left_index, right_index)
joined_index_range = numpy.arange(len(joined_index))
left_values = numpy.asarray(
[self._data[left_indexer[i]] for i in joined_index_range],
numpy.float64
)
left_values[left_indexer == -1] = numpy.nan
right_values = numpy.asarray(
[other._data[right_indexer[i]] for i in joined_index_range],
numpy.float64
)
right_values[right_indexer == -1] = numpy.nan
result_data = left_values + right_values
return pandas.Series(result_data, joined_index)
return _series_operator_binop_common_impl
return None
| def sdc_pandas_series_operator_binop(self, other):
"""
Pandas Series operator :attr:`pandas.Series.binop` implementation
Note: Currently implemented for numeric Series only.
Differs from Pandas in returning Series with fixed dtype :obj:`float64`
.. only:: developer
**Test**: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op1*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op2*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_operator_binop*
Parameters
----------
series: :obj:`pandas.Series`
Input series
other: :obj:`pandas.Series` or :obj:`scalar`
Series or scalar value to be used as a second argument of binary operation
Returns
-------
:obj:`pandas.Series`
The result of the operation
"""
_func_name = 'Operator binop().'
ty_checker = TypeChecker('Operator binop().')
if not isinstance(self, SeriesType):
return None
if not isinstance(other, (SeriesType, types.Number)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
if isinstance(other, SeriesType):
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_data_comparable = check_types_comparable(self.data, other.data)
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if isinstance(other, SeriesType) and not series_data_comparable:
raise TypingError('{} Not supported for series with not-comparable data. \
Given: self.data={}, other.data={}'.format(_func_name, self.data, other.data))
if isinstance(other, SeriesType) and not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
# specializations for numeric series - TODO: support arithmetic operation on StringArrays
if (isinstance(other, types.Number)):
def _series_operator_binop_scalar_impl(self, other):
result_data = self._data.astype(numpy.float64) + numpy.float64(other)
return pandas.Series(result_data, index=self._index, name=self._name)
return _series_operator_binop_scalar_impl
elif (isinstance(other, SeriesType)):
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_operator_binop_none_indexes_impl(self, other):
if (len(self._data) == len(other._data)):
result_data = self._data.astype(numpy.float64)
result_data = result_data + other._data.astype(numpy.float64)
return pandas.Series(result_data)
else:
left_size, right_size = len(self._data), len(other._data)
min_data_size = min(left_size, right_size)
max_data_size = max(left_size, right_size)
result_data = numpy.empty(max_data_size, dtype=numpy.float64)
if (left_size == min_data_size):
result_data[:min_data_size] = self._data
result_data[min_data_size:] = numpy.nan
result_data = result_data + other._data.astype(numpy.float64)
else:
result_data[:min_data_size] = other._data
result_data[min_data_size:] = numpy.nan
result_data = self._data.astype(numpy.float64) + result_data
return pandas.Series(result_data, self._index)
return _series_operator_binop_none_indexes_impl
else:
# for numeric indexes find common dtype to be used when creating joined index
if none_or_numeric_indexes:
ty_left_index_dtype = types.int64 if isinstance(self.index, types.NoneType) else self.index.dtype
ty_right_index_dtype = types.int64 if isinstance(other.index, types.NoneType) else other.index.dtype
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[ty_left_index_dtype, ty_right_index_dtype], [])
def _series_operator_binop_common_impl(self, other):
left_index, right_index = self.index, other.index
# check if indexes are equal and series don't have to be aligned
if sdc_check_indexes_equal(left_index, right_index):
result_data = self._data.astype(numpy.float64)
result_data = result_data + other._data.astype(numpy.float64)
if none_or_numeric_indexes == True: # noqa
result_index = left_index.astype(numba_index_common_dtype)
else:
result_index = self._index
return pandas.Series(result_data, index=result_index)
# TODO: replace below with core join(how='outer', return_indexers=True) when implemented
joined_index, left_indexer, right_indexer = sdc_join_series_indexes(left_index, right_index)
joined_index_range = numpy.arange(len(joined_index))
left_values = numpy.asarray(
[self._data[left_indexer[i]] for i in joined_index_range],
numpy.float64
)
left_values[left_indexer == -1] = numpy.nan
right_values = numpy.asarray(
[other._data[right_indexer[i]] for i in joined_index_range],
numpy.float64
)
right_values[right_indexer == -1] = numpy.nan
result_data = left_values + right_values
return pandas.Series(result_data, joined_index)
return _series_operator_binop_common_impl
return None
|
22,468 | def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
if "name" not in param_attrib and "argument" not in param_attrib:
lint_ctx.error("Found param input with no name specified.")
continue
param_name = _parse_name(param_attrib.get("name"), param_attrib.get("argument"))
if "type" not in param_attrib:
lint_ctx.error(f"Param input [{param_name}] input with no type specified.")
continue
param_type = param_attrib["type"]
if not is_valid_cheetah_placeholder(param_name):
lint_ctx.warn(f"Param input [{param_name}] is not a valid Cheetah placeholder.")
if param_type == "data":
if "format" not in param_attrib:
lint_ctx.warn(f"Param input [{param_name}] with no format specified - 'data' format will be assumed.")
elif param_type == "select":
# get dynamic/statically defined options
dynamic_options = param.get("dynamic_options", None)
options = param.findall("./options")
filters = param.findall("./options/filter")
select_options = param.findall('./option')
if dynamic_options is not None:
lint_ctx.warn(f"Select parameter [{param_name}] uses deprecated 'dynamic_options' attribute.")
# check if options are defined by exactly one possibility
if (dynamic_options is not None) + (len(options) > 0) + (len(select_options) > 0) != 1:
lint_ctx.error(f"Select parameter [{param_name}] options have to be defined by either 'option' children elements, a 'options' element or the 'dynamic_options' attribute.")
# lint dynamic options
if len(options) == 1:
filters = options[0].findall("./filter")
# lint filters
filter_adds_options = False
for f in filters:
ftype = f.get("type", None)
if ftype is None:
lint_ctx.error(f"Select parameter [{param_name}] contains filter without type.")
continue
if ftype not in FILTER_TYPES:
lint_ctx.error(f"Select parameter [{param_name}] contains filter with unknown type '{ftype}'.")
continue
if ftype in ['add_value', 'data_meta']:
filter_adds_options = True
# TODO more linting of filters
from_file = options[0].get("from_file", None)
from_parameter = options[0].get("from_parameter", None)
from_dataset = options[0].get("from_dataset", None)
from_data_table = options[0].get("from_data_table", None)
if (from_file is None and from_parameter is None
and from_dataset is None and from_data_table is None
and not filter_adds_options):
lint_ctx.error(f"Select parameter [{param_name}] options tag defines no options. Use 'from_dataset', 'from_data_table', or a filter that adds values.")
if from_file is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'from_file' attribute.")
if from_parameter is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'from_parameter' attribute.")
if from_dataset is not None and from_data_table is not None:
lint_ctx.error(f"Select parameter [{param_name}] options uses 'from_dataset' and 'from_data_table' attribute.")
if options[0].get("meta_file_key", None) is not None and from_dataset is None:
lint_ctx.error(f"Select parameter [{param_name}] 'meta_file_key' is only compatible with 'from_dataset'.")
if options[0].get("options_filter_attribute", None) is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'options_filter_attribute' attribute.")
if options[0].get("transform_lines", None) is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'transform_lines' attribute.")
elif len(options) > 1:
lint_ctx.error(f"Select parameter [{param_name}] contains multiple options elements")
# lint statically defined options
if any(['value' not in option.attrib for option in select_options]):
lint_ctx.error(f"Select parameter [{param_name}] has option without value")
if len(set([option.text.strip() for option in select_options if option.text is not None])) != len(select_options):
lint_ctx.error(f"Select parameter [{param_name}] has multiple options with the same text content")
if len(set([option.attrib.get("value") for option in select_options])) != len(select_options):
lint_ctx.error(f"Select parameter [{param_name}] has multiple options with the same value")
multiple = string_as_bool(param_attrib.get("multiple", "false"))
optional = string_as_bool(param_attrib.get("optional", param_attrib.get("multiple", "false")))
if param_attrib.get("display") == "checkboxes":
if not multiple:
lint_ctx.error(f'Select [{param_name}] `display="checkboxes"` is incompatible with `multiple="false"`, remove the `display` attribute')
if not optional:
lint_ctx.error(f'Select [{param_name}] `display="checkboxes"` is incompatible with `optional="false"`, remove the `display` attribute')
if param_attrib.get("display") == "radio":
if multiple:
lint_ctx.error(f'Select [{param_name}] display="radio" is incompatible with multiple="true"')
if optional:
lint_ctx.error(f'Select [{param_name}] display="radio" is incompatible with optional="true"')
# TODO: Validate type, much more...
# lint validators
validators = param.findall("./validator")
for validator in validators:
vtype = validator.attrib['type']
if param_type in PARAMETER_VALIDATOR_TYPE_COMPATIBILITY:
if vtype not in PARAMETER_VALIDATOR_TYPE_COMPATIBILITY[param_type]:
lint_ctx.error(f"Parameter [{param_name}]: validator with an incompatible type '{vtype}'")
for attrib in ATTRIB_VALIDATOR_COMPATIBILITY:
if attrib in validator.attrib and vtype not in ATTRIB_VALIDATOR_COMPATIBILITY[attrib]:
lint_ctx.error(f"Parameter [{param_name}]: attribute '{attrib}' is incompatible with validator of type '{vtype}'")
if vtype == "expression" and validator.text is None:
lint_ctx.error(f"Parameter [{param_name}]: expression validator without content")
if vtype not in ["expression", "regex"] and validator.text is not None:
lint_ctx.warn(f"Parameter [{param_name}]: '{vtype}' validators are not expected to contain text (found '{validator.text}')")
if vtype in ["in_range", "length", "dataset_metadata_in_range"] and ("min" not in validator.attrib and "max" not in validator.attrib):
lint_ctx.error(f"Parameter [{param_name}]: '{vtype}' validators need to define the 'min' or 'max' attribute(s)")
if vtype in ["metadata"] and ("check" not in validator.attrib and "skip" not in validator.attrib):
lint_ctx.error(f"Parameter [{param_name}]: '{vtype}' validators need to define the 'check' or 'skip' attribute(s) {validator.attrib}")
if vtype in ["value_in_data_table", "value_not_in_data_table", "dataset_metadata_in_data_table", "dataset_metadata_not_in_data_table"] and "table_name" not in validator.attrib:
lint_ctx.error(f"Parameter [{param_name}]: '{vtype}' validators need to define the 'table_name' attribute")
conditional_selects = tool_xml.findall("./inputs//conditional")
for conditional in conditional_selects:
conditional_name = conditional.get('name')
if not conditional_name:
lint_ctx.error("Conditional without a name")
if conditional.get("value_from"):
# Probably only the upload tool use this, no children elements
continue
first_param = conditional.find("param")
if first_param is None:
lint_ctx.error(f"Conditional [{conditional_name}] has no child <param>")
continue
first_param_type = first_param.get('type')
if first_param_type not in ['select', 'boolean']:
lint_ctx.warn(f'Conditional [{conditional_name}] first param should have type="select" /> or type="boolean"')
continue
if first_param_type == 'select':
select_options = _find_with_attribute(first_param, 'option', 'value')
option_ids = [option.get('value') for option in select_options]
else: # boolean
option_ids = [
first_param.get('truevalue', 'true'),
first_param.get('falsevalue', 'false')
]
if string_as_bool(first_param.get('optional', False)):
lint_ctx.warn(f"Conditional [{conditional_name}] test parameter cannot be optional")
whens = conditional.findall('./when')
if any('value' not in when.attrib for when in whens):
lint_ctx.error(f"Conditional [{conditional_name}] when without value")
when_ids = [w.get('value') for w in whens]
for option_id in option_ids:
if option_id not in when_ids:
lint_ctx.warn(f"Conditional [{conditional_name}] no <when /> block found for {first_param_type} option '{option_id}'")
for when_id in when_ids:
if when_id not in option_ids:
if first_param_type == 'select':
lint_ctx.warn(f"Conditional [{conditional_name}] no <option /> found for when block '{when_id}'")
else:
lint_ctx.warn(f"Conditional [{conditional_name}] no truevalue/falsevalue found for when block '{when_id}'")
if datasource:
for datasource_tag in ('display', 'uihints'):
if not any([param.tag == datasource_tag for param in inputs]):
lint_ctx.info(f"{datasource_tag} tag usually present in data sources")
if num_inputs:
lint_ctx.info(f"Found {num_inputs} input parameters.")
else:
if datasource:
lint_ctx.info("No input parameters, OK for data sources")
else:
lint_ctx.warn("Found no input parameters.")
| def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
if "name" not in param_attrib and "argument" not in param_attrib:
lint_ctx.error("Found param input with no name specified.")
continue
param_name = _parse_name(param_attrib.get("name"), param_attrib.get("argument"))
if "type" not in param_attrib:
lint_ctx.error(f"Param input [{param_name}] input with no type specified.")
continue
param_type = param_attrib["type"]
if not is_valid_cheetah_placeholder(param_name):
lint_ctx.warn(f"Param input [{param_name}] is not a valid Cheetah placeholder.")
if param_type == "data":
if "format" not in param_attrib:
lint_ctx.warn(f"Param input [{param_name}] with no format specified - 'data' format will be assumed.")
elif param_type == "select":
# get dynamic/statically defined options
dynamic_options = param.get("dynamic_options", None)
options = param.findall("./options")
filters = param.findall("./options/filter")
select_options = param.findall('./option')
if dynamic_options is not None:
lint_ctx.warn(f"Select parameter [{param_name}] uses deprecated 'dynamic_options' attribute.")
# check if options are defined by exactly one possibility
if (dynamic_options is not None) + (len(options) > 0) + (len(select_options) > 0) != 1:
lint_ctx.error(f"Select parameter [{param_name}] options have to be defined by either 'option' children elements, a 'options' element or the 'dynamic_options' attribute.")
# lint dynamic options
if len(options) == 1:
filters = options[0].findall("./filter")
# lint filters
filter_adds_options = False
for f in filters:
ftype = f.get("type", None)
if ftype is None:
lint_ctx.error(f"Select parameter [{param_name}] contains filter without type.")
continue
if ftype not in FILTER_TYPES:
lint_ctx.error(f"Select parameter [{param_name}] contains filter with unknown type '{ftype}'.")
continue
if ftype in ['add_value', 'data_meta']:
filter_adds_options = True
# TODO more linting of filters
from_file = options[0].get("from_file", None)
from_parameter = options[0].get("from_parameter", None)
from_dataset = options[0].get("from_dataset", None)
from_data_table = options[0].get("from_data_table", None)
if (from_file is None and from_parameter is None
and from_dataset is None and from_data_table is None
and not filter_adds_options):
lint_ctx.error(f"Select parameter [{param_name}] options tag defines no options. Use 'from_dataset', 'from_data_table', or a filter that adds values.")
if from_file is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'from_file' attribute.")
if from_parameter is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'from_parameter' attribute.")
if from_dataset is not None and from_data_table is not None:
lint_ctx.error(f"Select parameter [{param_name}] options uses 'from_dataset' and 'from_data_table' attribute.")
if options[0].get("meta_file_key", None) is not None and from_dataset is None:
lint_ctx.error(f"Select parameter [{param_name}] 'meta_file_key' is only compatible with 'from_dataset'.")
if options[0].get("options_filter_attribute", None) is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'options_filter_attribute' attribute.")
if options[0].get("transform_lines", None) is not None:
lint_ctx.warn(f"Select parameter [{param_name}] options uses deprecated 'transform_lines' attribute.")
elif len(options) > 1:
lint_ctx.error(f"Select parameter [{param_name}] contains multiple options elements")
# lint statically defined options
if any(['value' not in option.attrib for option in select_options]):
lint_ctx.error(f"Select parameter [{param_name}] has option without value")
if len(set([option.text.strip() for option in select_options if option.text is not None])) != len(select_options):
lint_ctx.error(f"Select parameter [{param_name}] has multiple options with the same text content")
if len(set([option.attrib.get("value") for option in select_options])) != len(select_options):
lint_ctx.error(f"Select parameter [{param_name}] has multiple options with the same value")
multiple = string_as_bool(param_attrib.get("multiple", "false"))
optional = string_as_bool(param_attrib.get("optional", multiple))
if param_attrib.get("display") == "checkboxes":
if not multiple:
lint_ctx.error(f'Select [{param_name}] `display="checkboxes"` is incompatible with `multiple="false"`, remove the `display` attribute')
if not optional:
lint_ctx.error(f'Select [{param_name}] `display="checkboxes"` is incompatible with `optional="false"`, remove the `display` attribute')
if param_attrib.get("display") == "radio":
if multiple:
lint_ctx.error(f'Select [{param_name}] display="radio" is incompatible with multiple="true"')
if optional:
lint_ctx.error(f'Select [{param_name}] display="radio" is incompatible with optional="true"')
# TODO: Validate type, much more...
# lint validators
validators = param.findall("./validator")
for validator in validators:
vtype = validator.attrib['type']
if param_type in PARAMETER_VALIDATOR_TYPE_COMPATIBILITY:
if vtype not in PARAMETER_VALIDATOR_TYPE_COMPATIBILITY[param_type]:
lint_ctx.error(f"Parameter [{param_name}]: validator with an incompatible type '{vtype}'")
for attrib in ATTRIB_VALIDATOR_COMPATIBILITY:
if attrib in validator.attrib and vtype not in ATTRIB_VALIDATOR_COMPATIBILITY[attrib]:
lint_ctx.error(f"Parameter [{param_name}]: attribute '{attrib}' is incompatible with validator of type '{vtype}'")
if vtype == "expression" and validator.text is None:
lint_ctx.error(f"Parameter [{param_name}]: expression validator without content")
if vtype not in ["expression", "regex"] and validator.text is not None:
lint_ctx.warn(f"Parameter [{param_name}]: '{vtype}' validators are not expected to contain text (found '{validator.text}')")
if vtype in ["in_range", "length", "dataset_metadata_in_range"] and ("min" not in validator.attrib and "max" not in validator.attrib):
lint_ctx.error(f"Parameter [{param_name}]: '{vtype}' validators need to define the 'min' or 'max' attribute(s)")
if vtype in ["metadata"] and ("check" not in validator.attrib and "skip" not in validator.attrib):
lint_ctx.error(f"Parameter [{param_name}]: '{vtype}' validators need to define the 'check' or 'skip' attribute(s) {validator.attrib}")
if vtype in ["value_in_data_table", "value_not_in_data_table", "dataset_metadata_in_data_table", "dataset_metadata_not_in_data_table"] and "table_name" not in validator.attrib:
lint_ctx.error(f"Parameter [{param_name}]: '{vtype}' validators need to define the 'table_name' attribute")
conditional_selects = tool_xml.findall("./inputs//conditional")
for conditional in conditional_selects:
conditional_name = conditional.get('name')
if not conditional_name:
lint_ctx.error("Conditional without a name")
if conditional.get("value_from"):
# Probably only the upload tool use this, no children elements
continue
first_param = conditional.find("param")
if first_param is None:
lint_ctx.error(f"Conditional [{conditional_name}] has no child <param>")
continue
first_param_type = first_param.get('type')
if first_param_type not in ['select', 'boolean']:
lint_ctx.warn(f'Conditional [{conditional_name}] first param should have type="select" /> or type="boolean"')
continue
if first_param_type == 'select':
select_options = _find_with_attribute(first_param, 'option', 'value')
option_ids = [option.get('value') for option in select_options]
else: # boolean
option_ids = [
first_param.get('truevalue', 'true'),
first_param.get('falsevalue', 'false')
]
if string_as_bool(first_param.get('optional', False)):
lint_ctx.warn(f"Conditional [{conditional_name}] test parameter cannot be optional")
whens = conditional.findall('./when')
if any('value' not in when.attrib for when in whens):
lint_ctx.error(f"Conditional [{conditional_name}] when without value")
when_ids = [w.get('value') for w in whens]
for option_id in option_ids:
if option_id not in when_ids:
lint_ctx.warn(f"Conditional [{conditional_name}] no <when /> block found for {first_param_type} option '{option_id}'")
for when_id in when_ids:
if when_id not in option_ids:
if first_param_type == 'select':
lint_ctx.warn(f"Conditional [{conditional_name}] no <option /> found for when block '{when_id}'")
else:
lint_ctx.warn(f"Conditional [{conditional_name}] no truevalue/falsevalue found for when block '{when_id}'")
if datasource:
for datasource_tag in ('display', 'uihints'):
if not any([param.tag == datasource_tag for param in inputs]):
lint_ctx.info(f"{datasource_tag} tag usually present in data sources")
if num_inputs:
lint_ctx.info(f"Found {num_inputs} input parameters.")
else:
if datasource:
lint_ctx.info("No input parameters, OK for data sources")
else:
lint_ctx.warn("Found no input parameters.")
|
5,365 | def test_uptodate_with_pkgs_with_changes():
"""
Test pkg.uptodate with simulated changes
"""
pkgs = {
"pkga": {"old": "1.0.1", "new": "2.0.1"},
"pkgb": {"old": "1.0.2", "new": "2.0.2"},
"pkgc": {"old": "1.0.3", "new": "2.0.3"},
}
list_upgrades = MagicMock(
return_value={pkgname: pkgver["new"] for pkgname, pkgver in pkgs.items()}
)
upgrade = MagicMock(return_value=pkgs)
version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]["old"])
with patch.dict(
pkg.__salt__,
{
"pkg.list_upgrades": list_upgrades,
"pkg.upgrade": upgrade,
"pkg.version": version,
},
):
# Run state with test=false
with patch.dict(pkg.__opts__, {"test": False}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"]
assert ret["changes"] == pkgs
# Run state with test=true
with patch.dict(pkg.__opts__, {"test": True}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"] is None
assert ret["changes"] == pkgs
| def test_uptodate_with_pkgs_with_changes(pkgs):
"""
Test pkg.uptodate with simulated changes
"""
pkgs = {
"pkga": {"old": "1.0.1", "new": "2.0.1"},
"pkgb": {"old": "1.0.2", "new": "2.0.2"},
"pkgc": {"old": "1.0.3", "new": "2.0.3"},
}
list_upgrades = MagicMock(
return_value={pkgname: pkgver["new"] for pkgname, pkgver in pkgs.items()}
)
upgrade = MagicMock(return_value=pkgs)
version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]["old"])
with patch.dict(
pkg.__salt__,
{
"pkg.list_upgrades": list_upgrades,
"pkg.upgrade": upgrade,
"pkg.version": version,
},
):
# Run state with test=false
with patch.dict(pkg.__opts__, {"test": False}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"]
assert ret["changes"] == pkgs
# Run state with test=true
with patch.dict(pkg.__opts__, {"test": True}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"] is None
assert ret["changes"] == pkgs
|
30,947 | def main() -> None:
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# Commands dictionary
commands: Dict[str, Callable] = {
'gsuite-custom-user-schema-update': custom_user_schema_update_command,
'gsuite-custom-user-schema-create': custom_user_schema_create_command,
'gsuite-datatransfer-list': datatransfer_list_command,
'gsuite-role-assignment-create': role_assignment_create_command,
'gsuite-role-assignment-list': role_assignment_list_command,
'gsuite-user-create': user_create_command,
'gsuite-mobile-update': mobile_update_command,
'gsuite-mobile-delete': mobile_delete_command,
'gsuite-user-alias-add': user_alias_add_command,
'gsuite-group-create': group_create_command,
'gsuite-role-create': role_create_command,
'gsuite-token-revoke': token_revoke_command,
'gsuite-datatransfer-request-create': datatransfer_request_create_command,
'gsuite-user-delete': user_delete_command,
'gsuite-user-update': user_update_command
}
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
global ADMIN_EMAIL
params = demisto.params()
service_account_dict = GSuiteClient.safe_load_non_strict_json(params.get('user_service_account_json'))
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {
'Content-Type': 'application/json'
}
# prepare client class object
gsuite_client = GSuiteClient(service_account_dict,
base_url='https://www.googleapis.com/', verify=verify_certificate, proxy=proxy,
headers=headers)
# Trim the arguments
args = GSuiteClient.strip_dict(demisto.args())
ADMIN_EMAIL = args.get('admin_email') if args.get('admin_email') else params.get('admin_email')
# Validation of ADMIN_EMAIL
if ADMIN_EMAIL and not is_email_valid(ADMIN_EMAIL):
raise ValueError(MESSAGES['INVALID_ADMIN_EMAIL'])
# This is the call made when pressing the integration Test button.
if demisto.command() == 'test-module':
result = test_function(gsuite_client)
demisto.results(result)
elif command in commands:
return_results(commands[command](gsuite_client, args))
# Log exceptions
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Error: {str(e)}')
| def main() -> None:
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# Commands dictionary
commands: Dict[str, Callable] = {
'gsuite-custom-user-schema-update': custom_user_schema_update_command,
'gsuite-custom-user-schema-create': custom_user_schema_create_command,
'gsuite-datatransfer-list': datatransfer_list_command,
'gsuite-role-assignment-create': role_assignment_create_command,
'gsuite-role-assignment-list': role_assignment_list_command,
'gsuite-user-create': user_create_command,
'gsuite-mobile-update': mobile_update_command,
'gsuite-mobile-delete': mobile_delete_command,
'gsuite-user-alias-add': user_alias_add_command,
'gsuite-group-create': group_create_command,
'gsuite-role-create': role_create_command,
'gsuite-token-revoke': token_revoke_command,
'gsuite-datatransfer-request-create': datatransfer_request_create_command,
'gsuite-user-delete': user_delete_command,
'gsuite-user-update': user_update_command
}
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
global ADMIN_EMAIL
params = demisto.params()
service_account_dict = GSuiteClient.safe_load_non_strict_json(params.get('user_service_account_json'))
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {
'Content-Type': 'application/json'
}
# prepare client class object
gsuite_client = GSuiteClient(service_account_dict,
base_url='https://www.googleapis.com/', verify=verify_certificate, proxy=proxy,
headers=headers)
# Trim the arguments
args = GSuiteClient.strip_dict(demisto.args())
ADMIN_EMAIL = args.get('admin_email') if args.get('admin_email') else params.get('admin_email')
# Validation of ADMIN_EMAIL
if ADMIN_EMAIL and not is_email_valid(ADMIN_EMAIL):
raise ValueError(MESSAGES['INVALID_ADMIN_EMAIL'])
# This is the call made when pressing the integration Test button.
if demisto.command() == 'test-module':
result = test_module(gsuite_client)
demisto.results(result)
elif command in commands:
return_results(commands[command](gsuite_client, args))
# Log exceptions
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Error: {str(e)}')
|
2,039 | def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : int
The number of seeds to choose
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : array, shape (n_clusters, n_features)
The inital centers for k-means.
indices : list, length (n_clusters)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.randint(n_samples)
indices = np.empty(n_clusters)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1,
out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
indices = [int(x) for x in indices.tolist()]
return centers, indices
| def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : int
The number of seeds to choose
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : narray of shape (n_clusters, n_features)
The inital centers for k-means.
indices : list, length (n_clusters)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.randint(n_samples)
indices = np.empty(n_clusters)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1,
out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
indices = [int(x) for x in indices.tolist()]
return centers, indices
|
35,589 | def densenet201(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet:
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
**kwargs)
| def densenet201(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
**kwargs)
|
59,509 | def get_channel_waveform(sched: Schedule,
chan: PulseChannel,
backend: Union[None, IBMQBackend] = None,
qubit_index: Union[None, int] = None,
chan_freq: Union[None, float] = None,
dt: float = 2e-9 / 9,
apply_carrier_wave: bool = False):
"""Returns the waveforms on a PulseChannel.
Args:
sched: The pulse Schedule object.
chan: The PulseChannel on which the waveform is to be returned.
backend: An IBMQBackend from which the qubit frequency and dt
are to be extracted.
qubit_index: An integer indicating the qubit index.
chan_freq: A float indicating the channel wave frequency. Not necessary if
both backend and qubit_index are specified.
dt: Qubit drive channel timestep in seconds. Default to the 2/9 ns.
apply_carrier_wave: Whether the carrier wave is applied to the waveforms.
Returns:
chan_waveform: A complex-valued array of the waveform on the
given PulseChannel.
"""
# Check consistency of arguments
if not isinstance(chan, PulseChannel):
raise TypeError("The channel must be a DriveChannel, "
"ControlChannel or a MeasureChannel")
if apply_carrier_wave:
if backend is not None and qubit_index is not None:
if isinstance(chan, MeasureChannel):
chan_freq = backend.defaults().meas_freq_est[qubit_index]
else:
chan_freq = backend.defaults().qubit_freq_est[qubit_index]
else:
assert chan_freq is not None
# Flatten the Schedule and transform it into an iterator of
# InstructionTuples
sched_trans = target_qobj_transform(sched)
chan_events = ChannelEvents.load_program(sched_trans, chan)
waveform_inst_tups = chan_events.get_waveforms()
if backend is not None:
dt = backend.configuration().dt
# Bulid the channel waveform
chan_waveform = np.zeros((sched_trans.duration,), dtype=complex)
for inst_tup in waveform_inst_tups:
if isinstance(inst_tup.inst, Play):
# Unpack the time points and phase and frequency in
# the current frame
t0 = inst_tup.t0
tf = t0 + inst_tup.inst.duration
t_array = np.arange(t0, tf) * dt
phase = inst_tup.frame.phase
freq = inst_tup.frame.freq
# Apply phase and frequency shifts and optionally carrier wave
pulse_waveform = inst_tup.inst.pulse.get_waveform().samples
pulse_waveform *= np.exp(1j * phase)
pulse_waveform *= np.exp(1j * freq * t_array)
if apply_carrier_wave:
pulse_waveform *= np.exp(1j * chan_freq * t_array)
chan_waveform[t0:tf] += pulse_waveform
return chan_waveform | def get_channel_waveform(sched: Schedule,
chan: PulseChannel,
backend: Union[None, IBMQBackend] = None,
qubit_index: Union[None, int] = None,
chan_freq: Union[None, float] = None,
dt: float = 2e-9 / 9,
apply_carrier_wave: bool = False):
"""Returns the waveforms on a PulseChannel.
Args:
sched: The pulse Schedule object.
chan: The PulseChannel on which the waveform is to be returned.
backend: An IBMQBackend from which the qubit frequency and dt
are to be extracted.
qubit_index: An integer indicating the qubit index.
chan_freq: A float indicating the channel wave frequency. Not necessary if
both backend and qubit_index are specified.
dt: Qubit drive channel timestep in seconds. Default to the 2/9 ns.
apply_carrier_wave: Whether the carrier wave is applied to the waveforms.
Returns:
chan_waveform: A complex-valued array of the waveform on the
return chan_waveform
given PulseChannel.
"""
# Check consistency of arguments
if not isinstance(chan, PulseChannel):
raise TypeError("The channel must be a DriveChannel, "
"ControlChannel or a MeasureChannel")
if apply_carrier_wave:
if backend is not None and qubit_index is not None:
if isinstance(chan, MeasureChannel):
chan_freq = backend.defaults().meas_freq_est[qubit_index]
else:
chan_freq = backend.defaults().qubit_freq_est[qubit_index]
else:
assert chan_freq is not None
# Flatten the Schedule and transform it into an iterator of
# InstructionTuples
sched_trans = target_qobj_transform(sched)
chan_events = ChannelEvents.load_program(sched_trans, chan)
waveform_inst_tups = chan_events.get_waveforms()
if backend is not None:
dt = backend.configuration().dt
# Bulid the channel waveform
chan_waveform = np.zeros((sched_trans.duration,), dtype=complex)
for inst_tup in waveform_inst_tups:
if isinstance(inst_tup.inst, Play):
# Unpack the time points and phase and frequency in
# the current frame
t0 = inst_tup.t0
tf = t0 + inst_tup.inst.duration
t_array = np.arange(t0, tf) * dt
phase = inst_tup.frame.phase
freq = inst_tup.frame.freq
# Apply phase and frequency shifts and optionally carrier wave
pulse_waveform = inst_tup.inst.pulse.get_waveform().samples
pulse_waveform *= np.exp(1j * phase)
pulse_waveform *= np.exp(1j * freq * t_array)
if apply_carrier_wave:
pulse_waveform *= np.exp(1j * chan_freq * t_array)
chan_waveform[t0:tf] += pulse_waveform
return chan_waveform |
44,069 | def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
| def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
|
35,213 | def _nvcc_gencode_options(cuda_version):
"""Returns NVCC GPU code generation options."""
if sys.argv == ['setup.py', 'develop']:
return []
envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)
if envcfg is not None and envcfg != 'current':
return ['--generate-code={}'.format(arch)
for arch in envcfg.split(';') if len(arch) > 0]
if envcfg == 'current' and build.get_compute_capabilities() is not None:
ccs = build.get_compute_capabilities()
arch_list = [
f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}')
for cc in ccs]
else:
# The arch_list specifies virtual architectures, such as 'compute_61',
# and real architectures, such as 'sm_61', for which the CUDA
# input files are to be compiled.
#
# The syntax of an entry of the list is
#
# entry ::= virtual_arch | (virtual_arch, real_arch)
#
# where virtual_arch is a string which means a virtual architecture and
# real_arch is a string which means a real architecture.
#
# If a virtual architecture is supplied, NVCC generates a PTX code
# the virtual architecture. If a pair of a virtual architecture and a
# real architecture is supplied, NVCC generates a PTX code for the
# virtual architecture as well as a cubin code for the real one.
#
# For example, making NVCC generate a PTX code for 'compute_60' virtual
# architecture, the arch_list has an entry of 'compute_60'.
#
# arch_list = ['compute_60']
#
# For another, making NVCC generate a PTX code for 'compute_61' virtual
# architecture and a cubin code for 'sm_61' real architecture, the
# arch_list has an entry of ('compute_61', 'sm_61').
#
# arch_list = [('compute_61', 'sm_61')]
#
# See the documentation of each CUDA version for the list of supported
# architectures:
#
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation
if cuda_version >= 11040:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
'compute_86']
elif cuda_version >= 11000:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
'compute_80']
elif cuda_version >= 10000:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
'compute_70']
elif cuda_version >= 9020:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
'compute_70']
else:
# This should not happen.
assert False
options = []
for arch in arch_list:
if type(arch) is tuple:
virtual_arch, real_arch = arch
options.append('--generate-code=arch={},code={}'.format(
virtual_arch, real_arch))
else:
options.append('--generate-code=arch={},code={}'.format(
arch, arch))
return options
| def _nvcc_gencode_options(cuda_version):
"""Returns NVCC GPU code generation options."""
if sys.argv == ['setup.py', 'develop']:
return []
envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)
if envcfg is not None and envcfg != 'current':
return ['--generate-code={}'.format(arch)
for arch in envcfg.split(';') if len(arch) > 0]
if envcfg == 'current' and build.get_compute_capabilities() is not None:
ccs = build.get_compute_capabilities()
arch_list = [
f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}')
for cc in ccs]
else:
# The arch_list specifies virtual architectures, such as 'compute_61',
# and real architectures, such as 'sm_61', for which the CUDA
# input files are to be compiled.
#
# The syntax of an entry of the list is
#
# entry ::= virtual_arch | (virtual_arch, real_arch)
#
# where virtual_arch is a string which means a virtual architecture and
# real_arch is a string which means a real architecture.
#
# If a virtual architecture is supplied, NVCC generates a PTX code
# the virtual architecture. If a pair of a virtual architecture and a
# real architecture is supplied, NVCC generates a PTX code for the
# virtual architecture as well as a cubin code for the real one.
#
# For example, making NVCC generate a PTX code for 'compute_60' virtual
# architecture, the arch_list has an entry of 'compute_60'.
#
# arch_list = ['compute_60']
#
# For another, making NVCC generate a PTX code for 'compute_61' virtual
# architecture and a cubin code for 'sm_61' real architecture, the
# arch_list has an entry of ('compute_61', 'sm_61').
#
# arch_list = [('compute_61', 'sm_61')]
#
# See the documentation of each CUDA version for the list of supported
# architectures:
#
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation
if cuda_version >= 11040:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
'compute_80']
elif cuda_version >= 11000:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
'compute_80']
elif cuda_version >= 10000:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
'compute_70']
elif cuda_version >= 9020:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
'compute_70']
else:
# This should not happen.
assert False
options = []
for arch in arch_list:
if type(arch) is tuple:
virtual_arch, real_arch = arch
options.append('--generate-code=arch={},code={}'.format(
virtual_arch, real_arch))
else:
options.append('--generate-code=arch={},code={}'.format(
arch, arch))
return options
|
25,003 | def test_fall_back_on_base_config() -> None:
"""Test that we correctly fall back on the base config."""
# A file under the current dir should fall back to the highest level
# For pylint this is ./pylintrc
runner = Run([__name__], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
# When the file is a directory that does not have any of its parents in
# linter._directory_namespaces it should default to the base config
with tempfile.TemporaryDirectory() as tmpdir:
with open(Path(tmpdir) / "test.py", "w", encoding="utf-8") as f:
f.write("1")
Run([str(Path(tmpdir) / "test.py")], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
| def test_fall_back_on_base_config(tmpdir: LocalPath) -> None:
"""Test that we correctly fall back on the base config."""
# A file under the current dir should fall back to the highest level
# For pylint this is ./pylintrc
runner = Run([__name__], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
# When the file is a directory that does not have any of its parents in
# linter._directory_namespaces it should default to the base config
with tempfile.TemporaryDirectory() as tmpdir:
with open(Path(tmpdir) / "test.py", "w", encoding="utf-8") as f:
f.write("1")
Run([str(Path(tmpdir) / "test.py")], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
|
852 | def test_issue_14811():
assert limit(((1 + (Rational(2,3) ** (x + 1))) ** (2 ** x)) / (2 ** (Rational(4,3) ** (x - 1))), x, oo) is oo
| def test_issue_14811():
assert limit(((1 + ((S(2)/3)**(x + 1)))**(2**x))/(2**((S(4)/3)**(x - 1))), x, oo) == oo
|
6,802 | def clear_timeline_references(link_doctype, link_name):
frappe.db.sql("""delete from `tabCommunication Link`
where `tabCommunication Link`.link_doctype='{0}' and `tabCommunication Link`.link_name=%s""".format(link_doctype), (link_name)) # nosec
| def clear_timeline_references(link_doctype, link_name):
frappe.db.sql("""delete from `tabCommunication Link`
where `tabCommunication Link`.link_doctype=%s and `tabCommunication Link`.link_name=%s""", (link_doctype, link_name))
|
13,002 | def get_user_country_context(
destination_address: Optional[
Union["account_types.AddressInput", "account_models.Address"]
] = None,
company_address: Optional["account_models.Address"] = None,
) -> str:
"""Get country of the current user to use for tax and stock related calculations.
User's country context is determined from the provided `destination_address` (which
may represent user's current location determined by the client or a shipping
address provided in the checkout). If `destination_address` is not given, the
default company address from the shop settings is assumed. If this address is not
set, fallback to the `DEFAULT_COUNTRY` setting.
"""
if destination_address and destination_address.country:
if isinstance(destination_address, Country):
return destination_address.country.code
else:
return destination_address.country
elif company_address and company_address.country:
return company_address.country
else:
return settings.DEFAULT_COUNTRY
| def get_user_country_context(
destination_address: Optional[
Union["account_types.AddressInput", "account_models.Address"]
] = None,
company_address: Optional["account_models.Address"] = None,
) -> str:
"""Get country of the current user to use for tax and stock related calculations.
User's country context is determined from the provided `destination_address` (which
may represent user's current location determined by the client or a shipping
address provided in the checkout). If `destination_address` is not given, the
default company address from the shop settings is assumed. If this address is not
set, fallback to the `DEFAULT_COUNTRY` setting.
"""
if destination_address and destination_address.country:
if isinstance(destination_address.country, Country):
return destination_address.country.code
else:
return destination_address.country
elif company_address and company_address.country:
return company_address.country
else:
return settings.DEFAULT_COUNTRY
|
55,480 | def test___repr___empty():
modin_s = pd.Series()
pandas_s = pandas.Series()
assert repr(modin_s) == repr(pandas_s)
| def test___repr___empty():
modin_s, pandas_s = pd.Series(), pandas.Series()
assert repr(modin_s) == repr(pandas_s)
|
32,352 | def start_fetchfile_command():
malop_id = demisto.getArg('malopGUID')
user_name = demisto.getArg('userName')
response = get_file_guids(malop_id)
for filename, file_guid in list(response.items()):
api_response = start_fetchfile(file_guid, user_name)
try:
if api_response['status'] == "SUCCESS":
demisto.results("Successfully started fetching file for the given malop")
except Exception:
raise Exception("Failed to start fetch file process")
| def start_fetchfile_command():
malop_id = demisto.getArg('malopGUID')
user_name = demisto.getArg('userName')
response = get_file_guids(malop_id)
for filename, file_guid in list(response.items()):
api_response = start_fetchfile(file_guid, user_name)
try:
if api_response['status'] == "SUCCESS":
return CommandResults(readable_output="Successfully started fetching file for the given malop")
except Exception:
raise Exception("Failed to start fetch file process")
|
44,068 | def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
| def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
|
45,872 | def distort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Distortion of a set of 2D points based on the lens distortion model.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
"""
assert points.dim() >= 2 and points.shape[-1] == 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN or (N,)
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN or (N,)
# Distort points
r2 = x * x + y * y
rad_poly = (1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3) / (
1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3
)
xd = (
x * rad_poly
+ 2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
yd = (
y * rad_poly
+ dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
tilt = tiltProjection(dist[..., 12], dist[..., 13])
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
pointsUntilt = torch.stack([xd, yd, torch.ones(xd.shape, device=xd.device, dtype=xd.dtype)], -1) @ tilt.transpose(-2, -1)
xd = pointsUntilt[..., 0] / pointsUntilt[..., 2]
yd = pointsUntilt[..., 1] / pointsUntilt[..., 2]
# Covert points from normalized camera coordinates to pixel coordinates
x = fx * xd + cx
y = fy * yd + cy
return torch.stack([x, y], -1)
| def distort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Distortion of a set of 2D points based on the lens distortion model.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
"""
assert points.dim() >= 2 and points.shape[-1] == 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN or (N,)
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN or (N,)
# Distort points
r2 = x * x + y * y
rad_poly = (1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3) / (
1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3
)
xd = (
x * rad_poly
+ 2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
yd = (
y * rad_poly
+ dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
tilt = tiltProjection(dist[..., 12], dist[..., 13])
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
pointsUntilt = torch.stack([xd, yd, torch.ones(xd.shape, device=xd.device, dtype=xd.dtype)], -1) @ tilt.transpose(-2, -1)
xd = pointsUntilt[..., 0] / pointsUntilt[..., 2]
yd = pointsUntilt[..., 1] / (pointsUntilt[..., 2] + eps)
# Covert points from normalized camera coordinates to pixel coordinates
x = fx * xd + cx
y = fy * yd + cy
return torch.stack([x, y], -1)
|
31,583 | def ip_search_command(client, args):
page = int(args.get('page', 1))
query = args.get('query', None)
params = {
"page": page
}
body = {
"query": query
}
res = client.query(query_type="ip_search", params=params, body=body)
records = res.get('records')
record_count = res.get('record_count')
md = tableToMarkdown(f"IP DSL Search Results ({record_count} record(s)):", records)
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.IP.Search",
outputs_key_field="ip",
outputs=records,
readable_output=md
)
return_results(command_results)
create_standard_ip_context(
ip_data=[{
"Address": x.get('ip'),
"Hostname": x.get('ptr'),
"Ports": ", ".join([str(y['port']) for y in x.get('ports')])
} for x in records])
| def ip_search_command(client, args):
page = int(args.get('page', 1))
query = args.get('query', None)
params = {
"page": page
}
body = {
"query": query
}
res = client.query(query_type="ip_search", params=params, body=body)
records = res.get('records')
record_count = res.get('record_count')
md = tableToMarkdown(f"IP DSL Search Results ({record_count} record(s)):", records)
command_results = CommandResults(
outputs_prefix="SecurityTrails.IP.Search",
outputs_key_field="ip",
outputs=records,
readable_output=md
)
return_results(command_results)
create_standard_ip_context(
ip_data=[{
"Address": x.get('ip'),
"Hostname": x.get('ptr'),
"Ports": ", ".join([str(y['port']) for y in x.get('ports')])
} for x in records])
|
44,047 | def generate_symmetries(qubit_op, num_qubits):
"""Get generators of symmetries, taus, for a given Hamiltonian.
Args:
qubit_op (Hamiltonian): Hamiltonian for which symmetries are to be generated to perform tapering.
num_qubits (int): number of wires required to define the Hamiltonian.
Returns:
generators (list): list of generators of symmetries, taus, for the Hamiltonian.
.. code-block::
>>> symbols, coordinates = (['H', 'H'], np.array([0., 0., -0.66140414, 0., 0., 0.66140414]))
>>> H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates)
>>> generators = generate_symmetries(H, qubits)
[(1.0) [Z0 Z1], (1.0) [Z0 Z2], (1.0) [Z0 Z3]]
"""
# Generate binary matrix for qubit_op
E = _binary_matrix(qubit_op.ops, num_qubits)
# Get reduced row echelon form of binary matrix E
E_rref = _reduced_row_echelon(E)
E_reduced = E_rref[~np.all(E_rref == 0, axis=1)] # remove all-zero rows
# Get kernel (i.e., nullspace) for trimmed binary matrix using gaussian elimination
nullspace = _kernel(E_reduced)
# Get generators tau from the calculated nullspace
generators = generate_taus(nullspace, num_qubits)
# Get unitaries from the calculated nullspace
pauli_x = generate_paulis(generators, num_qubits)
return generators, pauli_x
| def generate_symmetries(qubit_op, num_qubits):
"""Get generators of symmetries, taus, for a given Hamiltonian.
Args:
qubit_op (Hamiltonian): Hamiltonian for which symmetries are to be generated to perform tapering.
num_qubits (int): number of wires required to define the Hamiltonian.
Returns:
generators (list): list of tau symmetry generators for the Hamiltonian.
.. code-block::
>>> symbols, coordinates = (['H', 'H'], np.array([0., 0., -0.66140414, 0., 0., 0.66140414]))
>>> H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates)
>>> generators = generate_symmetries(H, qubits)
[(1.0) [Z0 Z1], (1.0) [Z0 Z2], (1.0) [Z0 Z3]]
"""
# Generate binary matrix for qubit_op
E = _binary_matrix(qubit_op.ops, num_qubits)
# Get reduced row echelon form of binary matrix E
E_rref = _reduced_row_echelon(E)
E_reduced = E_rref[~np.all(E_rref == 0, axis=1)] # remove all-zero rows
# Get kernel (i.e., nullspace) for trimmed binary matrix using gaussian elimination
nullspace = _kernel(E_reduced)
# Get generators tau from the calculated nullspace
generators = generate_taus(nullspace, num_qubits)
# Get unitaries from the calculated nullspace
pauli_x = generate_paulis(generators, num_qubits)
return generators, pauli_x
|
13,473 | def auto_update(enable=True):
# TODO: Add openSUSE zypper equivalent
service = "yum-cron"
fo, npath = mkstemp()
updated = False
with open(YCFILE) as ifo, open(npath, "w") as tfo:
for line in ifo.readlines():
if re.match("apply_updates = ", line) is not None:
if enable:
tfo.write("apply_updates = yes\n")
else:
tfo.write("apply_updates = no\n")
updated = True
else:
tfo.write(line)
if not updated:
raise Exception(
"apply_updates directive missing in {}, assuming its "
"is corrupt. No change made.".format(YCFILE)
)
shutil.move(npath, YCFILE)
if enable:
systemctl(service, "enable")
systemctl(service, "start")
else:
systemctl(service, "stop")
systemctl(service, "disable")
| def auto_update(enable=True):
# TODO: Add openSUSE zypper equivalent
service = "yum-cron"
fo, npath = mkstemp()
updated = False
with open(YCFILE) as ifo, open(npath, "w") as tfo:
for line in ifo.readlines():
if re.match("apply_updates = ", line) is not None:
if enable:
tfo.write("apply_updates = yes\n")
else:
tfo.write("apply_updates = no\n")
updated = True
else:
tfo.write(line)
if not updated:
raise Exception(
"apply_updates directive missing in {}, assuming it "
"is corrupt. No change made.".format(YCFILE)
)
shutil.move(npath, YCFILE)
if enable:
systemctl(service, "enable")
systemctl(service, "start")
else:
systemctl(service, "stop")
systemctl(service, "disable")
|
28,307 | def do1d(
param_set: _BaseParameter, start: float, stop: float,
num_points: int, delay: float,
*param_meas: ParamMeasT,
enter_actions: ActionsT = (),
exit_actions: ActionsT = (),
write_period: Optional[float] = None,
measurement_name: str = "",
exp: Optional[Experiment] = None,
do_plot: Optional[bool] = None,
use_threads: Optional[bool] = None,
additional_setpoints: Sequence[ParamMeasT] = tuple(),
show_progress: Optional[None] = None,
) -> AxesTupleListWithDataSet:
"""
Perform a 1D scan of ``param_set`` from ``start`` to ``stop`` in
``num_points`` measuring param_meas at each step. In case param_meas is
an ArrayParameter this is effectively a 2d scan.
Args:
param_set: The QCoDeS parameter to sweep over
start: Starting point of sweep
stop: End point of sweep
num_points: Number of points in sweep
delay: Delay after setting parameter before measurement is performed
*param_meas: Parameter(s) to measure at each step or functions that
will be called at each step. The function should take no arguments.
The parameters and functions are called in the order they are
supplied.
enter_actions: A list of functions taking no arguments that will be
called before the measurements start
exit_actions: A list of functions taking no arguments that will be
called after the measurements ends
write_period: The time after which the data is actually written to the
database.
additional_setpoints: A list of setpoint parameters to be registered in
the measurement but not scanned.
measurement_name: Name of the measurement. This will be passed down to
the dataset produced by the measurement. If not given, a default
value of 'results' is used for the dataset.
exp: The experiment to use for this measurement.
do_plot: should png and pdf versions of the images be saved after the
run. If None the setting will be read from ``qcodesrc.json`
use_threads: If True measurements from each instrument will be done on
separate threads. If you are measuring from several instruments
this may give a significant speedup.
show_progress: should a progress bar be displayed during the
measurement. If None the setting will be read from ``qcodesrc.json`
Returns:
The QCoDeS dataset.
"""
if do_plot is None:
do_plot = config.dataset.dond_plot
if show_progress is None:
show_progress = config.dataset.dond_show_progress
meas = Measurement(name=measurement_name, exp=exp)
meas._extra_log_info = f"{measurement_name} using do1d"
all_setpoint_params = (param_set,) + tuple(s for s in additional_setpoints)
measured_parameters = tuple(
param for param in param_meas if isinstance(param, _BaseParameter)
)
try:
loop_shape = tuple(1 for _ in additional_setpoints) + (num_points,)
shapes: Shapes = detect_shape_of_measurement(
measured_parameters,
loop_shape
)
except TypeError:
LOG.exception(
f"Could not detect shape of {measured_parameters} "
f"falling back to unknown shape.")
shapes = None
_register_parameters(meas, all_setpoint_params)
_register_parameters(meas, param_meas, setpoints=all_setpoint_params,
shapes=shapes)
_set_write_period(meas, write_period)
_register_actions(meas, enter_actions, exit_actions)
original_delay = param_set.post_delay
param_set.post_delay = delay
# do1D enforces a simple relationship between measured parameters
# and set parameters. For anything more complicated this should be
# reimplemented from scratch
with _catch_keyboard_interrupts() as interrupted, meas.run() as datasaver:
dataset = datasaver.dataset
additional_setpoints_data = process_params_meas(additional_setpoints)
setpoints = np.linspace(start, stop, num_points)
# flush to prevent unflushed print's to visually interrupt tqdm bar
# updates
sys.stdout.flush()
sys.stderr.flush()
for set_point in tqdm(setpoints, disable=not show_progress):
param_set.set(set_point)
datasaver.add_result(
(param_set, set_point),
*process_params_meas(param_meas, use_threads=use_threads),
*additional_setpoints_data
)
param_set.post_delay = original_delay
return _handle_plotting(dataset, do_plot, interrupted())
| def do1d(
param_set: _BaseParameter, start: float, stop: float,
num_points: int, delay: float,
*param_meas: ParamMeasT,
enter_actions: ActionsT = (),
exit_actions: ActionsT = (),
write_period: Optional[float] = None,
measurement_name: str = "",
exp: Optional[Experiment] = None,
do_plot: Optional[bool] = None,
use_threads: Optional[bool] = None,
additional_setpoints: Sequence[ParamMeasT] = tuple(),
show_progress: Optional[None] = None,
) -> AxesTupleListWithDataSet:
"""
Perform a 1D scan of ``param_set`` from ``start`` to ``stop`` in
``num_points`` measuring param_meas at each step. In case param_meas is
an ArrayParameter this is effectively a 2d scan.
Args:
param_set: The QCoDeS parameter to sweep over
start: Starting point of sweep
stop: End point of sweep
num_points: Number of points in sweep
delay: Delay after setting parameter before measurement is performed
*param_meas: Parameter(s) to measure at each step or functions that
will be called at each step. The function should take no arguments.
The parameters and functions are called in the order they are
supplied.
enter_actions: A list of functions taking no arguments that will be
called before the measurements start
exit_actions: A list of functions taking no arguments that will be
called after the measurements ends
write_period: The time after which the data is actually written to the
database.
additional_setpoints: A list of setpoint parameters to be registered in
the measurement but not scanned.
measurement_name: Name of the measurement. This will be passed down to
the dataset produced by the measurement. If not given, a default
value of 'results' is used for the dataset.
exp: The experiment to use for this measurement.
do_plot: should png and pdf versions of the images be saved after the
run. If None the setting will be read from ``qcodesrc.json`
use_threads: If True measurements from each instrument will be done on
separate threads. If you are measuring from several instruments
this may give a significant speedup.
show_progress: should a progress bar be displayed during the
measurement. If None the setting will be read from ``qcodesrc.json`
Returns:
The QCoDeS dataset.
"""
if do_plot is None:
do_plot = config.dataset.dond_plot
if show_progress is None:
show_progress = config.dataset.dond_show_progress
meas = Measurement(name=measurement_name, exp=exp)
meas._extra_log_info = f"Using 'qcodes.utils.modulename.do1d'"
all_setpoint_params = (param_set,) + tuple(s for s in additional_setpoints)
measured_parameters = tuple(
param for param in param_meas if isinstance(param, _BaseParameter)
)
try:
loop_shape = tuple(1 for _ in additional_setpoints) + (num_points,)
shapes: Shapes = detect_shape_of_measurement(
measured_parameters,
loop_shape
)
except TypeError:
LOG.exception(
f"Could not detect shape of {measured_parameters} "
f"falling back to unknown shape.")
shapes = None
_register_parameters(meas, all_setpoint_params)
_register_parameters(meas, param_meas, setpoints=all_setpoint_params,
shapes=shapes)
_set_write_period(meas, write_period)
_register_actions(meas, enter_actions, exit_actions)
original_delay = param_set.post_delay
param_set.post_delay = delay
# do1D enforces a simple relationship between measured parameters
# and set parameters. For anything more complicated this should be
# reimplemented from scratch
with _catch_keyboard_interrupts() as interrupted, meas.run() as datasaver:
dataset = datasaver.dataset
additional_setpoints_data = process_params_meas(additional_setpoints)
setpoints = np.linspace(start, stop, num_points)
# flush to prevent unflushed print's to visually interrupt tqdm bar
# updates
sys.stdout.flush()
sys.stderr.flush()
for set_point in tqdm(setpoints, disable=not show_progress):
param_set.set(set_point)
datasaver.add_result(
(param_set, set_point),
*process_params_meas(param_meas, use_threads=use_threads),
*additional_setpoints_data
)
param_set.post_delay = original_delay
return _handle_plotting(dataset, do_plot, interrupted())
|
8,222 | def test_unit(hmi_synoptic):
# Check that the deafult unit of Mx/cm**2 is correctly replaced with a
# FITS standard unit
assert hmi_synoptic.unit == u.G
hmi_synoptic.meta['bunit'] = 'm'
assert hmi_synoptic.unit == u.m
| def test_unit(hmi_synoptic):
# Check that the default unit of Mx/cm**2 is correctly replaced with a
# FITS standard unit
assert hmi_synoptic.unit == u.G
hmi_synoptic.meta['bunit'] = 'm'
assert hmi_synoptic.unit == u.m
|
52,932 | def _sanitize_rst(string):
"""Use regex to remove at least some sphinx directives."""
# :class:`a.b.c <thing here>`, :ref:`abc <thing here>` --> thing here
p, e = r'(\s|^):[^:\s]+:`', r'`(\W|$)'
string = re.sub(p + r'\S+\s*<([^>`]+)>' + e, r'\1\2\3', string)
# :class:`~a.b.c` --> c
string = re.sub(p + r'~([^`]+)' + e, _regroup, string)
# :class:`a.b.c` --> a.b.c
string = re.sub(p + r'([^`]+)' + e, r'\1\2\3', string)
# ``whatever thing`` --> whatever thing
p = r'(\s|^)`'
string = re.sub(p + r'`([^`]+)`' + e, r'\1\2\3', string)
# `whatever thing` --> whatever thing
string = re.sub(p + r'([^`]+)' + e, r'\1\2\3', string)
# **string** --> string
string = re.sub(r'\*\*([^\*]*)\*\*', r'\1', string)
# *string* --> string
string = re.sub(r'\*([^\*]*)\*', r'\1', string)
# `link text <url>`_ --> link text
string = re.sub(r'`(.*) <.*>`\_', r'\1', string)
# :term:`the term` --> the term
string = re.sub(r':term:`(.*)`', r'\1', string)
# :ref:`the ref` --> the ref
string = re.sub(r':ref:`(.*)`', r'\1', string)
return string
| def _sanitize_rst(string):
"""Use regex to remove at least some sphinx directives."""
# :class:`a.b.c <thing here>`, :ref:`abc <thing here>` --> thing here
p, e = r'(\s|^):[^:\s]+:`', r'`(\W|$)'
string = re.sub(p + r'\S+\s*<([^>`]+)>' + e, r'\1\2\3', string)
# :class:`~a.b.c` --> c
string = re.sub(p + r'~([^`]+)' + e, _regroup, string)
# :class:`a.b.c` --> a.b.c
string = re.sub(p + r'([^`]+)' + e, r'\1\2\3', string)
# ``whatever thing`` --> whatever thing
p = r'(\s|^)`'
string = re.sub(p + r'`([^`]+)`' + e, r'\1\2\3', string)
# `whatever thing` --> whatever thing
string = re.sub(p + r'([^`]+)' + e, r'\1\2\3', string)
# **string** --> string
string = re.sub(r'\*\*([^\*]*)\*\*', r'\1', string)
# *string* --> string
string = re.sub(r'\*([^\*]*)\*', r'\1', string)
# `link text <url>`_ --> link text
string = re.sub(r'`(.*) <.*>`\_', r'\1', string)
# :role:`the thing` --> the thing
string = re.sub(r':[a-z]+:`([^`]+)`', r'\1', string)
return string
|
826 | def test_diagonal_difference():
m = Matrix(3, 3, range(9))
diag_diff = m.diagonal_difference()
assert diag_diff == 2
| def test_diagonal_difference():
m = Matrix(3, 3, range(9))
diag_diff = m.diagonal_difference()
assert diag_diff == 0
|
6,945 | def pdf_to_base64(filename):
from frappe.utils.file_manager import get_file_path
if not filename.startswith('/files') or '..' in filename:
return
file_path = get_file_path(filename)
if not file_path:
return
with open(file_path, 'rb') as pdf_file:
base64_string = base64.b64encode(pdf_file.read())
return base64_string
| def pdf_to_base64(filename):
from frappe.utils.file_manager import get_file_path
if '../' in filename:
return
file_path = get_file_path(filename)
if not file_path:
return
with open(file_path, 'rb') as pdf_file:
base64_string = base64.b64encode(pdf_file.read())
return base64_string
|
2,660 | def test_change_n_init_future_warning():
km = KMeans(n_init=1)
with warnings.catch_warnings():
warnings.filterwarnings("error")
km.fit(X)
msg = "The default value of n_init will change from 10 to 5 in 1.3"
km = KMeans()
with pytest.warns(FutureWarning, match=msg):
km.fit(X)
| def test_change_n_init_future_warning():
km = KMeans(n_init=1)
with warnings.catch_warnings():
warnings.filterwarnings("error", FutureWarning)
km.fit(X)
msg = "The default value of n_init will change from 10 to 5 in 1.3"
km = KMeans()
with pytest.warns(FutureWarning, match=msg):
km.fit(X)
|
45,296 | def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column: Optional[str] = None,
lower_bound: Optional[int] = None,
upper_bound: Optional[int] = None,
max_sessions: Optional[int] = None,
) -> DataFrame:
"""
General documentation in `modin.pandas.read_sql`.
Experimental feature is simultaneous reading from a sql file.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy
connectable; str connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used to pass
parameters is database driver dependent. Check your database driver
documentation for which of the five syntax styles, described in PEP 249’s
paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params=
{‘name’ : ‘value’}.
parse_dates : list or dict, optional
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
partition_column : str, optional
Column used to share the data between the workers (MUST be a INTEGER column).
lower_bound : int, optional
The minimum value to be requested from the partition_column.
upper_bound : int, optional
The maximum value to be requested from the partition_column.
max_sessions : int, optional
The maximum number of simultaneous connections allowed to use.
Returns
-------
Modin DataFrame.
"""
Engine.subscribe(_update_engine)
assert IsExperimental.get(), "This only works in experimental mode"
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
return DataFrame(query_compiler=EngineDispatcher.read_sql(**kwargs))
| def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column: Optional[str] = None,
lower_bound: Optional[int] = None,
upper_bound: Optional[int] = None,
max_sessions: Optional[int] = None,
) -> DataFrame:
"""
General documentation is available in `modin.pandas.read_sql`.
Experimental feature is simultaneous reading from a sql file.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy
connectable; str connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used to pass
parameters is database driver dependent. Check your database driver
documentation for which of the five syntax styles, described in PEP 249’s
paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params=
{‘name’ : ‘value’}.
parse_dates : list or dict, optional
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
partition_column : str, optional
Column used to share the data between the workers (MUST be a INTEGER column).
lower_bound : int, optional
The minimum value to be requested from the partition_column.
upper_bound : int, optional
The maximum value to be requested from the partition_column.
max_sessions : int, optional
The maximum number of simultaneous connections allowed to use.
Returns
-------
Modin DataFrame.
"""
Engine.subscribe(_update_engine)
assert IsExperimental.get(), "This only works in experimental mode"
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
return DataFrame(query_compiler=EngineDispatcher.read_sql(**kwargs))
|
28,051 | def convert(reports: List[Report]) -> List[str]:
""" Convert the given reports to CodeChecker baseline format.
Returns a list of sorted unique report hashes.
"""
return sorted(set([r.report_hash for r in reports]))
| def convert(reports: List[Report]) -> List[str]:
""" Convert the given reports to CodeChecker baseline format.
Returns a list of sorted unique report hashes.
"""
return sorted(set(r.report_hash for r in reports))
|
899 | def unrad(eq, *syms, **flags):
"""
Remove radicals with symbolic arguments and return (eq, cov),
None, or raise an error.
Explanation
===========
None is returned if there are no radicals to remove.
NotImplementedError is raised if there are radicals and they cannot be
removed or if the relationship between the original symbols and the
change of variable needed to rewrite the system as a polynomial cannot
be solved.
Otherwise the tuple, ``(eq, cov)``, is returned where:
*eq*, ``cov``
*eq* is an equation without radicals (in the symbol(s) of
interest) whose solutions are a superset of the solutions to the
original expression. *eq* might be rewritten in terms of a new
variable; the relationship to the original variables is given by
``cov`` which is a list containing ``v`` and ``v**p - b`` where
``p`` is the power needed to clear the radical and ``b`` is the
radical now expressed as a polynomial in the symbols of interest.
For example, for sqrt(2 - x) the tuple would be
``(c, c**2 - 2 + x)``. The solutions of *eq* will contain
solutions to the original equation (if there are any).
*syms*
An iterable of symbols which, if provided, will limit the focus of
radical removal: only radicals with one or more of the symbols of
interest will be cleared. All free symbols are used if *syms* is not
set.
*flags* are used internally for communication during recursive calls.
Two options are also recognized:
``take``, when defined, is interpreted as a single-argument function
that returns True if a given Pow should be handled.
Radicals can be removed from an expression if:
* All bases of the radicals are the same; a change of variables is
done in this case.
* If all radicals appear in one term of the expression.
* There are only four terms with sqrt() factors or there are less than
four terms having sqrt() factors.
* There are only two terms with radicals.
Examples
========
>>> from sympy.solvers.solvers import unrad
>>> from sympy.abc import x
>>> from sympy import sqrt, Rational, root
>>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
(x**5 - 64, [])
>>> unrad(sqrt(x) + root(x + 1, 3))
(-x**3 + x**2 + 2*x + 1, [])
>>> eq = sqrt(x) + root(x, 3) - 2
>>> unrad(eq)
(_p**3 + _p**2 - 2, [_p, _p**6 - x])
"""
from sympy import Equality as Eq
uflags = dict(check=False, simplify=False)
def _cov(p, e):
if cov:
# XXX - uncovered
oldp, olde = cov
if Poly(e, p).degree(p) in (1, 2):
cov[:] = [p, olde.subs(oldp, _solve(e, p, **uflags)[0])]
else:
raise NotImplementedError
else:
cov[:] = [p, e]
def _canonical(eq, cov):
if cov:
# change symbol to vanilla so no solutions are eliminated
p, e = cov
rep = {p: Dummy(p.name)}
eq = eq.xreplace(rep)
cov = [p.xreplace(rep), e.xreplace(rep)]
# remove constants and powers of factors since these don't change
# the location of the root; XXX should factor or factor_terms be used?
eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True)
if eq.is_Mul:
args = []
for f in eq.args:
if f.is_number:
continue
if f.is_Pow:
args.append(f.base)
else:
args.append(f)
eq = Mul(*args) # leave as Mul for more efficient solving
# make the sign canonical
margs = list(Mul.make_args(eq))
changed = False
for i, m in enumerate(margs):
if m.could_extract_minus_sign():
margs[i] = -m
changed = True
if changed:
eq = Mul(*margs, evaluate=False)
return eq, cov
def _Q(pow):
# return leading Rational of denominator of Pow's exponent
c = pow.as_base_exp()[1].as_coeff_Mul()[0]
if not c.is_Rational:
return S.One
return c.q
# define the _take method that will determine whether a term is of interest
def _take(d):
# return True if coefficient of any factor's exponent's den is not 1
for pow in Mul.make_args(d):
if not pow.is_Pow:
continue
if _Q(pow) == 1:
continue
if pow.free_symbols & syms:
return True
return False
_take = flags.setdefault('_take', _take)
if not isinstance(eq, Eq):
eq = eq.rewrite(Add)
elif not isinstance(eq, Expr):
return
cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
sorted(dict(cov=[], n=None, rpt=0).items())]
# preconditioning
eq = powdenest(factor_terms(eq, radical=True, clear=True))
eq = eq.as_numer_denom()[0]
eq = _mexpand(eq, recursive=True)
if eq.is_number:
return
# see if there are radicals in symbols of interest
syms = set(syms) or eq.free_symbols # _take uses this
poly = eq.as_poly()
gens = [g for g in poly.gens if _take(g)]
if not gens:
return
# recast poly in terms of eigen-gens
poly = eq.as_poly(*gens)
# - an exponent has a symbol of interest (don't handle)
if any(g.exp.has(*syms) for g in gens):
return
def _rads_bases_lcm(poly):
# if all the bases are the same or all the radicals are in one
# term, `lcm` will be the lcm of the denominators of the
# exponents of the radicals
lcm = 1
rads = set()
bases = set()
for g in poly.gens:
q = _Q(g)
if q != 1:
rads.add(g)
lcm = ilcm(lcm, q)
bases.add(g.base)
return rads, bases, lcm
rads, bases, lcm = _rads_bases_lcm(poly)
covsym = Dummy('p', nonnegative=True)
# only keep in syms symbols that actually appear in radicals;
# and update gens
newsyms = set()
for r in rads:
newsyms.update(syms & r.free_symbols)
if newsyms != syms:
syms = newsyms
gens = [g for g in gens if g.free_symbols & syms]
# get terms together that have common generators
drad = dict(list(zip(rads, list(range(len(rads))))))
rterms = {(): []}
args = Add.make_args(poly.as_expr())
for t in args:
if _take(t):
common = set(t.as_poly().gens).intersection(rads)
key = tuple(sorted([drad[i] for i in common]))
else:
key = ()
rterms.setdefault(key, []).append(t)
others = Add(*rterms.pop(()))
rterms = [Add(*rterms[k]) for k in rterms.keys()]
# the output will depend on the order terms are processed, so
# make it canonical quickly
rterms = list(reversed(list(ordered(rterms))))
ok = False # we don't have a solution yet
depth = sqrt_depth(eq)
if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
eq = rterms[0]**lcm - ((-others)**lcm)
ok = True
else:
if len(rterms) == 1 and rterms[0].is_Add:
rterms = list(rterms[0].args)
if len(bases) == 1:
b = bases.pop()
if len(syms) > 1:
x = b.free_symbols
else:
x = syms
x = list(ordered(x))[0]
try:
inv = _solve(covsym**lcm - b, x, **uflags)
if not inv:
raise NotImplementedError
eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
_cov(covsym, covsym**lcm - b)
return _canonical(eq, cov)
except NotImplementedError:
pass
if len(rterms) == 2:
if not others:
eq = rterms[0]**lcm - (-rterms[1])**lcm
ok = True
elif not log(lcm, 2).is_Integer:
# the lcm-is-power-of-two case is handled below
r0, r1 = rterms
if flags.get('_reverse', False):
r1, r0 = r0, r1
i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
for reverse in range(2):
if reverse:
i0, i1 = i1, i0
r0, r1 = r1, r0
_rads1, _, lcm1 = i1
_rads1 = Mul(*_rads1)
t1 = _rads1**lcm1
c = covsym**lcm1 - t1
for x in syms:
try:
sol = _solve(c, x, **uflags)
if not sol:
raise NotImplementedError
neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
others
tmp = unrad(neweq, covsym)
if tmp:
eq, newcov = tmp
if newcov:
newp, newc = newcov
_cov(newp, c.subs(covsym,
_solve(newc, covsym, **uflags)[0]))
else:
_cov(covsym, c)
else:
eq = neweq
_cov(covsym, c)
ok = True
break
except NotImplementedError:
if reverse:
raise NotImplementedError(
'no successful change of variable found')
else:
pass
if ok:
break
elif len(rterms) == 3:
# two cube roots and another with order less than 5
# (so an analytical solution can be found) or a base
# that matches one of the cube root bases
info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
RAD = 0
BASES = 1
LCM = 2
if info[0][LCM] != 3:
info.append(info.pop(0))
rterms.append(rterms.pop(0))
elif info[1][LCM] != 3:
info.append(info.pop(1))
rterms.append(rterms.pop(1))
if info[0][LCM] == info[1][LCM] == 3:
if info[1][BASES] != info[2][BASES]:
info[0], info[1] = info[1], info[0]
rterms[0], rterms[1] = rterms[1], rterms[0]
if info[1][BASES] == info[2][BASES]:
eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
ok = True
elif info[2][LCM] < 5:
# a*root(A, 3) + b*root(B, 3) + others = c
a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
# zz represents the unraded expression into which the
# specifics for this case are substituted
zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
9*c*d**8 + d**9)
def _t(i):
b = Mul(*info[i][RAD])
return cancel(rterms[i]/b), Mul(*info[i][BASES])
aa, AA = _t(0)
bb, BB = _t(1)
cc = -rterms[2]
dd = others
eq = zz.xreplace(dict(zip(
(a, A, b, B, c, d),
(aa, AA, bb, BB, cc, dd))))
ok = True
# handle power-of-2 cases
if not ok:
if log(lcm, 2).is_Integer and (not others and
len(rterms) == 4 or len(rterms) < 4):
def _norm2(a, b):
return a**2 + b**2 + 2*a*b
if len(rterms) == 4:
# (r0+r1)**2 - (r2+r3)**2
r0, r1, r2, r3 = rterms
eq = _norm2(r0, r1) - _norm2(r2, r3)
ok = True
elif len(rterms) == 3:
# (r1+r2)**2 - (r0+others)**2
r0, r1, r2 = rterms
eq = _norm2(r1, r2) - _norm2(r0, others)
ok = True
elif len(rterms) == 2:
# r0**2 - (r1+others)**2
r0, r1 = rterms
eq = r0**2 - _norm2(r1, others)
ok = True
new_depth = sqrt_depth(eq) if ok else depth
rpt += 1 # XXX how many repeats with others unchanging is enough?
if not ok or (
nwas is not None and len(rterms) == nwas and
new_depth is not None and new_depth == depth and
rpt > 3):
raise NotImplementedError('Cannot remove all radicals')
flags.update(dict(cov=cov, n=len(rterms), rpt=rpt))
neq = unrad(eq, *syms, **flags)
if neq:
eq, cov = neq
eq, cov = _canonical(eq, cov)
return eq, cov
| def unrad(eq, *syms, **flags):
"""
Remove radicals with symbolic arguments and return (eq, cov),
None, or raise an error.
Explanation
===========
None is returned if there are no radicals to remove.
NotImplementedError is raised if there are radicals and they cannot be
removed or if the relationship between the original symbols and the
change of variable needed to rewrite the system as a polynomial cannot
be solved.
Otherwise the tuple, ``(eq, cov)``, is returned where:
*eq*, ``cov``
*eq* is an equation without radicals (in the symbol(s) of
interest) whose solutions are a superset of the solutions to the
original expression. *eq* might be rewritten in terms of a new
variable; the relationship to the original variables is given by
``cov`` which is a list containing ``v`` and ``v**p - b`` where
``p`` is the power needed to clear the radical and ``b`` is the
radical now expressed as a polynomial in the symbols of interest.
For example, for sqrt(2 - x) the tuple would be
``(c, c**2 - 2 + x)``. The solutions of *eq* will contain
solutions to the original equation (if there are any).
*syms*
An iterable of symbols which, if provided, will limit the focus of
radical removal: only radicals with one or more of the symbols of
interest will be cleared. All free symbols are used if *syms* is not
set.
*flags* are used internally for communication during recursive calls.
Two options are also recognized:
``take``, when defined, is interpreted as a single-argument function
that returns True if a given Pow should be handled.
Radicals can be removed from an expression if:
* All bases of the radicals are the same; a change of variables is
done in this case.
* If all radicals appear in one term of the expression.
* There are only four terms with sqrt() factors or there are less than
four terms having sqrt() factors.
* There are only two terms with radicals.
Examples
========
>>> from sympy.solvers.solvers import unrad
>>> from sympy.abc import x
>>> from sympy import sqrt, Rational, root
>>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
(x**5 - 64, [])
>>> unrad(sqrt(x) + root(x + 1, 3))
(-x**3 + x**2 + 2*x + 1, [])
>>> eq = sqrt(x) + root(x, 3) - 2
>>> unrad(eq)
(_p**3 + _p**2 - 2, [_p, _p**6 - x])
"""
from sympy import Equality as Eq
uflags = dict(check=False, simplify=False)
def _cov(p, e):
if cov:
# XXX - uncovered
oldp, olde = cov
if Poly(e, p).degree(p) in (1, 2):
cov[:] = [p, olde.subs(oldp, _solve(e, p, **uflags)[0])]
else:
raise NotImplementedError
else:
cov[:] = [p, e]
def _canonical(eq, cov):
if cov:
# change symbol to vanilla so no solutions are eliminated
p, e = cov
rep = {p: Dummy(p.name)}
eq = eq.xreplace(rep)
cov = [p.xreplace(rep), e.xreplace(rep)]
# remove constants and powers of factors since these don't change
# the location of the root; XXX should factor or factor_terms be used?
eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True)
if eq.is_Mul:
args = []
for f in eq.args:
if f.is_number:
continue
if f.is_Pow:
args.append(f.base)
else:
args.append(f)
eq = Mul(*args) # leave as Mul for more efficient solving
# make the sign canonical
margs = list(Mul.make_args(eq))
changed = False
for i, m in enumerate(margs):
if m.could_extract_minus_sign():
margs[i] = -m
changed = True
if changed:
eq = Mul(*margs, evaluate=False)
return eq, cov
def _Q(pow):
# return leading Rational of denominator of Pow's exponent
c = pow.as_base_exp()[1].as_coeff_Mul()[0]
if not c.is_Rational:
return S.One
return c.q
# define the _take method that will determine whether a term is of interest
def _take(d):
# return True if coefficient of any factor's exponent's den is not 1
for pow in Mul.make_args(d):
if not pow.is_Pow:
continue
if _Q(pow) == 1:
continue
if pow.free_symbols & syms:
return True
return False
_take = flags.setdefault('_take', _take)
if isinstance(eq, Eq):
eq = eq.rewrite(Add)
elif not isinstance(eq, Expr):
return
cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
sorted(dict(cov=[], n=None, rpt=0).items())]
# preconditioning
eq = powdenest(factor_terms(eq, radical=True, clear=True))
eq = eq.as_numer_denom()[0]
eq = _mexpand(eq, recursive=True)
if eq.is_number:
return
# see if there are radicals in symbols of interest
syms = set(syms) or eq.free_symbols # _take uses this
poly = eq.as_poly()
gens = [g for g in poly.gens if _take(g)]
if not gens:
return
# recast poly in terms of eigen-gens
poly = eq.as_poly(*gens)
# - an exponent has a symbol of interest (don't handle)
if any(g.exp.has(*syms) for g in gens):
return
def _rads_bases_lcm(poly):
# if all the bases are the same or all the radicals are in one
# term, `lcm` will be the lcm of the denominators of the
# exponents of the radicals
lcm = 1
rads = set()
bases = set()
for g in poly.gens:
q = _Q(g)
if q != 1:
rads.add(g)
lcm = ilcm(lcm, q)
bases.add(g.base)
return rads, bases, lcm
rads, bases, lcm = _rads_bases_lcm(poly)
covsym = Dummy('p', nonnegative=True)
# only keep in syms symbols that actually appear in radicals;
# and update gens
newsyms = set()
for r in rads:
newsyms.update(syms & r.free_symbols)
if newsyms != syms:
syms = newsyms
gens = [g for g in gens if g.free_symbols & syms]
# get terms together that have common generators
drad = dict(list(zip(rads, list(range(len(rads))))))
rterms = {(): []}
args = Add.make_args(poly.as_expr())
for t in args:
if _take(t):
common = set(t.as_poly().gens).intersection(rads)
key = tuple(sorted([drad[i] for i in common]))
else:
key = ()
rterms.setdefault(key, []).append(t)
others = Add(*rterms.pop(()))
rterms = [Add(*rterms[k]) for k in rterms.keys()]
# the output will depend on the order terms are processed, so
# make it canonical quickly
rterms = list(reversed(list(ordered(rterms))))
ok = False # we don't have a solution yet
depth = sqrt_depth(eq)
if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
eq = rterms[0]**lcm - ((-others)**lcm)
ok = True
else:
if len(rterms) == 1 and rterms[0].is_Add:
rterms = list(rterms[0].args)
if len(bases) == 1:
b = bases.pop()
if len(syms) > 1:
x = b.free_symbols
else:
x = syms
x = list(ordered(x))[0]
try:
inv = _solve(covsym**lcm - b, x, **uflags)
if not inv:
raise NotImplementedError
eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
_cov(covsym, covsym**lcm - b)
return _canonical(eq, cov)
except NotImplementedError:
pass
if len(rterms) == 2:
if not others:
eq = rterms[0]**lcm - (-rterms[1])**lcm
ok = True
elif not log(lcm, 2).is_Integer:
# the lcm-is-power-of-two case is handled below
r0, r1 = rterms
if flags.get('_reverse', False):
r1, r0 = r0, r1
i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
for reverse in range(2):
if reverse:
i0, i1 = i1, i0
r0, r1 = r1, r0
_rads1, _, lcm1 = i1
_rads1 = Mul(*_rads1)
t1 = _rads1**lcm1
c = covsym**lcm1 - t1
for x in syms:
try:
sol = _solve(c, x, **uflags)
if not sol:
raise NotImplementedError
neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
others
tmp = unrad(neweq, covsym)
if tmp:
eq, newcov = tmp
if newcov:
newp, newc = newcov
_cov(newp, c.subs(covsym,
_solve(newc, covsym, **uflags)[0]))
else:
_cov(covsym, c)
else:
eq = neweq
_cov(covsym, c)
ok = True
break
except NotImplementedError:
if reverse:
raise NotImplementedError(
'no successful change of variable found')
else:
pass
if ok:
break
elif len(rterms) == 3:
# two cube roots and another with order less than 5
# (so an analytical solution can be found) or a base
# that matches one of the cube root bases
info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
RAD = 0
BASES = 1
LCM = 2
if info[0][LCM] != 3:
info.append(info.pop(0))
rterms.append(rterms.pop(0))
elif info[1][LCM] != 3:
info.append(info.pop(1))
rterms.append(rterms.pop(1))
if info[0][LCM] == info[1][LCM] == 3:
if info[1][BASES] != info[2][BASES]:
info[0], info[1] = info[1], info[0]
rterms[0], rterms[1] = rterms[1], rterms[0]
if info[1][BASES] == info[2][BASES]:
eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
ok = True
elif info[2][LCM] < 5:
# a*root(A, 3) + b*root(B, 3) + others = c
a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
# zz represents the unraded expression into which the
# specifics for this case are substituted
zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
9*c*d**8 + d**9)
def _t(i):
b = Mul(*info[i][RAD])
return cancel(rterms[i]/b), Mul(*info[i][BASES])
aa, AA = _t(0)
bb, BB = _t(1)
cc = -rterms[2]
dd = others
eq = zz.xreplace(dict(zip(
(a, A, b, B, c, d),
(aa, AA, bb, BB, cc, dd))))
ok = True
# handle power-of-2 cases
if not ok:
if log(lcm, 2).is_Integer and (not others and
len(rterms) == 4 or len(rterms) < 4):
def _norm2(a, b):
return a**2 + b**2 + 2*a*b
if len(rterms) == 4:
# (r0+r1)**2 - (r2+r3)**2
r0, r1, r2, r3 = rterms
eq = _norm2(r0, r1) - _norm2(r2, r3)
ok = True
elif len(rterms) == 3:
# (r1+r2)**2 - (r0+others)**2
r0, r1, r2 = rterms
eq = _norm2(r1, r2) - _norm2(r0, others)
ok = True
elif len(rterms) == 2:
# r0**2 - (r1+others)**2
r0, r1 = rterms
eq = r0**2 - _norm2(r1, others)
ok = True
new_depth = sqrt_depth(eq) if ok else depth
rpt += 1 # XXX how many repeats with others unchanging is enough?
if not ok or (
nwas is not None and len(rterms) == nwas and
new_depth is not None and new_depth == depth and
rpt > 3):
raise NotImplementedError('Cannot remove all radicals')
flags.update(dict(cov=cov, n=len(rterms), rpt=rpt))
neq = unrad(eq, *syms, **flags)
if neq:
eq, cov = neq
eq, cov = _canonical(eq, cov)
return eq, cov
|
13,417 | def test_14_system_dataset_can_be_moved_to_another_pool_successfully_when_all_services_running(request):
depends(request, ["second_pool"])
services = {i['service']: i for i in GET('/service').json()}
services_list = list(services.keys())
services_list.remove('s3')
for service in services_list:
results = POST("/service/start/", {"service": service})
assert results.status_code == 200, results.text
results = PUT("/systemdataset/", {'pool': 'first_pool'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'first_pool', results.text
assert results.json()['basename'] == 'first_pool/.system', results.text
for service in services_list:
if service != 'ssh':
results = POST("/service/stop/", {"service": service})
assert results.status_code == 200, results.text
| def test_14_verify_sysds_can_be_moved_while_services_are_running(request):
depends(request, ["second_pool"])
services = {i['service']: i for i in GET('/service').json()}
services_list = list(services.keys())
services_list.remove('s3')
for service in services_list:
results = POST("/service/start/", {"service": service})
assert results.status_code == 200, results.text
results = PUT("/systemdataset/", {'pool': 'first_pool'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'first_pool', results.text
assert results.json()['basename'] == 'first_pool/.system', results.text
for service in services_list:
if service != 'ssh':
results = POST("/service/stop/", {"service": service})
assert results.status_code == 200, results.text
|
41,143 | def validate_density_matrix(
density_matrix: np.ndarray,
*, # Force keyword arguments
qid_shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None,
atol: float = 1e-7) -> None:
"""Checks that the given density matrix is valid.
Args:
density_matrix: The density matrix to validate.
qid_shape: The expected qid shape.
dtype: The expected dtype.
atol: Absolute numerical tolerance.
Raises:
ValueError: The density matrix does not have the correct dtype.
ValueError: The density matrix is not square and of size 2**num_qubits.
ValueError: The density matrix is not Hermitian.
ValueError: The density matrix does not have trace 1.
ValueError: The density matrix is not positive semidefinite.
"""
if dtype and density_matrix.dtype != dtype:
raise ValueError('Density matrix had dtype {} but expected {}'.format(
density_matrix.dtype, dtype))
if density_matrix.shape != (np.prod(qid_shape, dtype=int),) * 2:
raise ValueError(
'Density matrix was not square and of size 2**num_qubits, '
'instead was {}'.format(density_matrix.shape))
if not np.allclose(
density_matrix, np.transpose(np.conj(density_matrix)), atol=atol):
raise ValueError('The density matrix is not hermitian.')
if not np.isclose(np.trace(density_matrix), 1.0, atol=atol):
raise ValueError(
'Density matrix did not have trace 1 but instead {}'.format(
np.trace(density_matrix)))
if not np.all(np.linalg.eigvalsh(density_matrix) > -atol):
raise ValueError('The density matrix is not positive semidefinite.')
| def validate_density_matrix(
density_matrix: np.ndarray,
*, # Force keyword arguments
qid_shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None,
atol: float = 1e-7) -> None:
"""Checks that the given density matrix is valid.
Args:
density_matrix: The density matrix to validate.
qid_shape: The expected qid shape.
dtype: The expected dtype.
atol: Absolute numerical tolerance.
Raises:
ValueError: The density matrix does not have the correct dtype.
ValueError: The density matrix is not square and of size 2**num_qubits.
ValueError: The density matrix is not Hermitian.
ValueError: The density matrix does not have trace 1.
ValueError: The density matrix is not positive semidefinite.
"""
if dtype and density_matrix.dtype != dtype:
raise ValueError('Density matrix had dtype {} but expected {}'.format(
density_matrix.dtype, dtype))
if density_matrix.shape != (np.prod(qid_shape, dtype=int),) * 2:
raise ValueError(
'Density matrix with qid_shape {qid_shape} was expected to be of size {s} x {s}, '
'instead was {}'.format(density_matrix.shape))
if not np.allclose(
density_matrix, np.transpose(np.conj(density_matrix)), atol=atol):
raise ValueError('The density matrix is not hermitian.')
if not np.isclose(np.trace(density_matrix), 1.0, atol=atol):
raise ValueError(
'Density matrix did not have trace 1 but instead {}'.format(
np.trace(density_matrix)))
if not np.all(np.linalg.eigvalsh(density_matrix) > -atol):
raise ValueError('The density matrix is not positive semidefinite.')
|
12,986 | def create_orders(how_many=10, unconfirmed=0):
discounts = fetch_discounts(timezone.now())
for i in range(how_many):
if unconfirmed <= how_many and i in range(i, unconfirmed):
order = create_fake_order(discounts, alter_status=OrderStatus.UNCONFIRMED)
else:
order = create_fake_order(discounts)
yield "Order: %s" % (order,)
| def create_orders(how_many=10, unconfirmed=0):
discounts = fetch_discounts(timezone.now())
for i in range(how_many):
if unconfirmed <= how_many and i < unconfirmed:
order = create_fake_order(discounts, alter_status=OrderStatus.UNCONFIRMED)
else:
order = create_fake_order(discounts)
yield "Order: %s" % (order,)
|
31,285 | def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
get_endpoints_args = {}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if isError(res[0]):
return_error(f'Error occurred while trying to get XDR endpoints: {res[0].get("Contents")}')
endpoints = res[0]['Contents']
connected_endpoints = 0
for endpoint in endpoints:
if endpoint.get('endpoint_status') == 'CONNECTED':
connected_endpoints = connected_endpoints + 1
return_outputs(str(connected_endpoints))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
| def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
get_endpoints_args = {}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if isError(res[0]):
return_error(f'Error occurred while trying to get XDR endpoints: {res[0].get("Contents")}')
endpoints = res[0]['Contents']
connected_endpoints = 0
for endpoint in endpoints:
if endpoint.get('endpoint_status') == 'CONNECTED':
connected_endpoints = connected_endpoints + 1
return_results(connected_endpoints)
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
|
45,329 | def read(filename):
columns_names = [
"YEAR0",
"DATANUM",
"SERIAL",
"CBSERIAL",
"HHWT",
"CPI99",
"GQ",
"QGQ",
"PERNUM",
"PERWT",
"SEX",
"AGE",
"EDUC",
"EDUCD",
"INCTOT",
"SEX_HEAD",
"SEX_MOM",
"SEX_POP",
"SEX_SP",
"SEX_MOM2",
"SEX_POP2",
"AGE_HEAD",
"AGE_MOM",
"AGE_POP",
"AGE_SP",
"AGE_MOM2",
"AGE_POP2",
"EDUC_HEAD",
"EDUC_MOM",
"EDUC_POP",
"EDUC_SP",
"EDUC_MOM2",
"EDUC_POP2",
"EDUCD_HEAD",
"EDUCD_MOM",
"EDUCD_POP",
"EDUCD_SP",
"EDUCD_MOM2",
"EDUCD_POP2",
"INCTOT_HEAD",
"INCTOT_MOM",
"INCTOT_POP",
"INCTOT_SP",
"INCTOT_MOM2",
"INCTOT_POP2",
]
columns_types = [
"int64",
"int64",
"int64",
"float64",
"int64",
"float64",
"int64",
"float64",
"int64",
"int64",
"int64",
"int64",
"int64",
"int64",
"int64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
]
dtypes = {columns_names[i]: columns_types[i] for i in range(len(columns_names))}
df = pd.read_csv(
filename,
names=columns_names,
dtype=dtypes,
skiprows=1,
)
df.shape # to trigger real execution
df._query_compiler._modin_frame._partitions[0][
0
].frame_id = OmnisciServer().put_arrow_to_omnisci(
df._query_compiler._modin_frame._partitions[0][0].get()
) # to trigger real execution
return df
| def read(filename):
columns_names = [
*("YEAR0", "DATANUM", "SERIAL", "CBSERIAL", "HHWT", "CPI99", "GQ", "QGQ", "PERNUM", "PERWT", "SEX"),
"AGE",
"EDUC",
"EDUCD",
"INCTOT",
"SEX_HEAD",
"SEX_MOM",
"SEX_POP",
"SEX_SP",
"SEX_MOM2",
"SEX_POP2",
"AGE_HEAD",
"AGE_MOM",
"AGE_POP",
"AGE_SP",
"AGE_MOM2",
"AGE_POP2",
"EDUC_HEAD",
"EDUC_MOM",
"EDUC_POP",
"EDUC_SP",
"EDUC_MOM2",
"EDUC_POP2",
"EDUCD_HEAD",
"EDUCD_MOM",
"EDUCD_POP",
"EDUCD_SP",
"EDUCD_MOM2",
"EDUCD_POP2",
"INCTOT_HEAD",
"INCTOT_MOM",
"INCTOT_POP",
"INCTOT_SP",
"INCTOT_MOM2",
"INCTOT_POP2",
]
columns_types = [
"int64",
"int64",
"int64",
"float64",
"int64",
"float64",
"int64",
"float64",
"int64",
"int64",
"int64",
"int64",
"int64",
"int64",
"int64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
"float64",
]
dtypes = {columns_names[i]: columns_types[i] for i in range(len(columns_names))}
df = pd.read_csv(
filename,
names=columns_names,
dtype=dtypes,
skiprows=1,
)
df.shape # to trigger real execution
df._query_compiler._modin_frame._partitions[0][
0
].frame_id = OmnisciServer().put_arrow_to_omnisci(
df._query_compiler._modin_frame._partitions[0][0].get()
) # to trigger real execution
return df
|
8,136 | def read_flare_file(file):
"""
Read RHESSI flare list .fits file into ``pandas.DataFrame``. TIME values are parsed with
format 'utime', which is the same as Unix timestamp but starts 9 years later.
FLAGS are assigned their respective label (FLAG ID) and returned as `dict`.
Parameters
----------
file : `str`
The URL or local filename of the hessi flare list.
Returns
-------
``pandas.DataFrame``
out : ``pandas.DataFrame`` containing the parsed flares.
Examples
--------
>>> from sunpy.instr.rhessi as read_flare_file as rff
>>> rff("https://hesperia.gsfc.nasa.gov/hessidata/dbase/hessi_flare_list_201802.fits")
References
----------
https://hesperia.gsfc.nasa.gov/rhessi3/data-access/rhessi-data/flare-list/index.html
"""
try:
fits = sunpy.io.fits.read(file)
except Exception:
raise RuntimeError("couldn't load file " + file)
results = []
for row in fits[3].data[0:20]:
result_row = {}
for k in fits_rec_keys:
if k.endswith('_TIME'):
result_row[k] = parse_time(row[k], format="utime")
result_row[k].format = "datetime" # for human readable display inside the DF
elif k == 'FLAGS':
flags = {}
for i, fid in zip(row[k], fits[2].data['FLAG_IDS'][0]):
flags[fid] = i
result_row[k] = flags
else:
result_row[k] = row[k]
results.append(result_row)
return pd.DataFrame(results)
| def read_flare_file(file):
"""
Read RHESSI flare list .fits file into ``pandas.DataFrame``. TIME values are parsed with
format 'utime', which is the same as Unix timestamp but starts 9 years later.
FLAGS are assigned their respective label (FLAG ID) and returned as `dict`.
Parameters
----------
file : `str`
The URL or local filename of the hessi flare list.
Returns
-------
``pandas.DataFrame``
out : ``pandas.DataFrame`` containing the parsed flares.
Examples
--------
>>> from sunpy.instr.rhessi import read_flare_file as rff
>>> rff("https://hesperia.gsfc.nasa.gov/hessidata/dbase/hessi_flare_list_201802.fits")
References
----------
https://hesperia.gsfc.nasa.gov/rhessi3/data-access/rhessi-data/flare-list/index.html
"""
try:
fits = sunpy.io.fits.read(file)
except Exception:
raise RuntimeError("couldn't load file " + file)
results = []
for row in fits[3].data[0:20]:
result_row = {}
for k in fits_rec_keys:
if k.endswith('_TIME'):
result_row[k] = parse_time(row[k], format="utime")
result_row[k].format = "datetime" # for human readable display inside the DF
elif k == 'FLAGS':
flags = {}
for i, fid in zip(row[k], fits[2].data['FLAG_IDS'][0]):
flags[fid] = i
result_row[k] = flags
else:
result_row[k] = row[k]
results.append(result_row)
return pd.DataFrame(results)
|
35,668 | def get_weight(name: str) -> WeightsEnum:
"""
Gets the weight enum value by its full name. Example: ``ResNet50_Weights.ImageNet1K_V1``
Args:
name (str): The name of the weight enum entry.
Returns:
WeightsEnum: The requested weight enum.
"""
try:
enum_name, value_name = name.split(".")
except ValueError:
raise ValueError(f"Invalid weight name provided: '{name}'.")
base_module_name = ".".join(sys.modules[__name__].__name__.split(".")[:-1])
base_module = importlib.import_module(base_module_name)
model_modules = [base_module] + [
x[1] for x in inspect.getmembers(base_module, inspect.ismodule) if x[1].__file__.endswith("__init__.py")
]
weights_enum = None
for m in model_modules:
potential_class = m.__dict__.get(enum_name, None)
if potential_class is not None and issubclass(potential_class, WeightsEnum):
weights_enum = potential_class
break
if weights_enum is None:
raise ValueError(
"The weight class for the specific method couldn't be retrieved. Make sure the typing info is correct."
)
return weights_enum.from_str(value_name)
| def get_weight(name: str) -> WeightsEnum:
"""
Gets the weight enum value by its full name. Example: ``"ResNet50_Weights.ImageNet1K_V1"``
Args:
name (str): The name of the weight enum entry.
Returns:
WeightsEnum: The requested weight enum.
"""
try:
enum_name, value_name = name.split(".")
except ValueError:
raise ValueError(f"Invalid weight name provided: '{name}'.")
base_module_name = ".".join(sys.modules[__name__].__name__.split(".")[:-1])
base_module = importlib.import_module(base_module_name)
model_modules = [base_module] + [
x[1] for x in inspect.getmembers(base_module, inspect.ismodule) if x[1].__file__.endswith("__init__.py")
]
weights_enum = None
for m in model_modules:
potential_class = m.__dict__.get(enum_name, None)
if potential_class is not None and issubclass(potential_class, WeightsEnum):
weights_enum = potential_class
break
if weights_enum is None:
raise ValueError(
"The weight class for the specific method couldn't be retrieved. Make sure the typing info is correct."
)
return weights_enum.from_str(value_name)
|
57,735 | def remove_key_from_list(list_name: str, key_name: str) -> str:
res = demisto.executeCommand('getList', {'listName': list_name})
if (
not isinstance(res, list)
or 'Contents' not in res[0]
or not isinstance(res[0]['Contents'], str)
or res[0]['Contents'] == 'Item not found (8)'
):
raise ValueError(f'Cannot retrieve list {list_name}')
list_data: Dict = {}
if len(res[0]['Contents']) > 0:
try:
list_data = json.loads(res[0]['Contents'])
except json.decoder.JSONDecodeError as e:
raise ValueError(f'List does not contain valid JSON data: {e}')
elem = list_data.pop(key_name, None)
if not elem:
return f'Key {key_name} not found in list {list_name}, cannot remove.'
demisto.executeCommand('setList', {'listName': list_name, 'listData': json.dumps(list_data)})
return f'Successfully removed key {key_name} from list {list_name}.'
| def remove_key_from_list(list_name: str, key_name: str) -> str:
res = demisto.executeCommand('getList', {'listName': list_name})
if (
not isinstance(res, list)
or 'Contents' not in res[0]
or not isinstance(res[0]['Contents'], str)
or res[0]['Contents'] == 'Item not found (8)'
):
raise ValueError(f'Cannot retrieve list {list_name}')
list_data: Dict = {}
data = res[0]['Contents']
if data:
try:
list_data = json.loads(data)
except json.decoder.JSONDecodeError as e:
raise ValueError(f'List does not contain valid JSON data: {e}')
elem = list_data.pop(key_name, None)
if not elem:
return f'Key {key_name} not found in list {list_name}, cannot remove.'
demisto.executeCommand('setList', {'listName': list_name, 'listData': json.dumps(list_data)})
return f'Successfully removed key {key_name} from list {list_name}.'
|
37,810 | def path_for_icon(icon_name: str, relative_to: Path | None = None) -> Path:
if relative_to is None:
from_path = Path.cwd()
else:
from_path = relative_to.parent
absolute_path = PROJECT_ROOT / "docs" / "data" / "readme_icons" / f"{icon_name}.svg"
return absolute_path.resolve().relative_to(from_path.resolve())
| def path_for_icon(icon_name: str, relative_to: Path | None = None) -> Path:
from_path = Path.cwd() if relative_to is None else relative_to.parent
absolute_path = PROJECT_ROOT / "docs" / "data" / "readme_icons" / f"{icon_name}.svg"
return absolute_path.resolve().relative_to(from_path.resolve())
|
30,750 | def get_remediation_data_command(client: Client, args: dict, no_output_mode: bool) -> List[Dict[str, Any]]:
"""Get SafeBreach remediation data.
Arguments:
client {Client} -- Client derives from BaseClient
args {dict} -- function arguments
no_output_mode {bool} -- if true, this function will insert data to the context,
otherwise, it will just returns the data.
Keyword Arguments:
Returns:
Dict -- Each key is a unique SafeBreach data type.
Each value is a list of the data.
"""
insight_id: Optional[int] = args.get('insightId')
response = client.get_remediation_data(insight_id)
insight: Any = get_insights_command(client, {'insightIds': [insight_id]}, False)
if insight:
insight = insight[0]
if response.status_code < 200 or response.status_code >= 300:
raise DemistoException(f'Failed to fetch remediation data for insight id {insight_id}')
sb_remediation_data = response.json().get('remediationData')
processed_data = extract_data(sb_remediation_data)
readable_output_list = generate_readable_output(processed_data)
vendor_remediation_data = list(filter(lambda o: o['value'],
[{'type': "Splunk", "value": get_splunk_remedation_query(response)}]))
# Demisto Context:
dbot_score_list = []
standard_context_dict = {}
secondary_standard_context_dict: Any = {}
secondary_standard_context_list = []
secondary_path = ''
# SafeBreach Context:
safebreach_context_list = []
safebreach_context = {
"Id": insight_id,
'RawRemediationData': processed_data,
'VendorRemediationData': vendor_remediation_data
}
safebreach_context_list.append(safebreach_context)
for item in processed_data:
if item['type'].startswith('Attack') or len(processed_data) == 0:
continue
standard_context_list: Any = []
demisto_standard_path = get_demisto_context_path(item['type']) # e.g URL(val.Data == obj.Data)
demisto_data_type = SAFEBREACH_TO_DEMISTO_MAPPER.get(item['type']) # SHA256,Port,Protocol,Data,Command,URI
if item['type'] in ['DropPaths', 'URIs', 'URI', 'Command']:
item["value"] = item["value"].encode('utf-8').decode('unicode_escape').encode('latin1').decode('utf-8')
if demisto_data_type:
dbot_score = {
"Indicator": item["value"],
'type': get_dbot_type(item['type'], item["value"]), # TODO: maybe change it to SB_Indicator?
"Vendor": "SafeBreach",
"Score": 3 # TODO: Change to is behaviroal set to defaults
}
primary_standard_context = {
demisto_data_type: item["value"], # e.g Data : <URL>, SHA256:<SHA256>
"Malicious": {
"Description": f"SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}",
"Vendor": "SafeBreach"
}
}
if item['type'] in ['FQDNs/IPs', 'FQDN/IP']:
if re.match(IP_REGEX, item["value"]):
secondary_path = 'IP(val.Address == obj.Address)'
secondary_standard_context_dict = {
'IP': item["value"],
"Malicious": {
"Description": f"SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}",
"Vendor": "SafeBreach"
}
}
else:
secondary_path = 'Domain(val.Name == obj.Name)'
secondary_standard_context_dict = {
'Name': item["value"],
"Malicious": {
"Description": f"SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}",
"Vendor": "SafeBreach"
}
}
if demisto_standard_path:
standard_context_list.append(primary_standard_context)
secondary_standard_context_list.append(secondary_standard_context_dict)
dbot_score_list.append(dbot_score)
if len(standard_context_list) > 0 and demisto_standard_path:
standard_context_dict[demisto_standard_path] = standard_context_list
if secondary_path:
standard_context_dict[secondary_path] = secondary_standard_context_list
output_context = {
"DBotScore(val.Indicator == obj.Indicator)": dbot_score_list,
"SafeBreach.Insight(val.Id == obj.Id)": safebreach_context_list,
}
merged_context = {**output_context, **standard_context_dict}
readable_output = tableToMarkdown(name="Remediation Data", t=readable_output_list, removeNull=True)
if no_output_mode:
return_outputs(readable_output=readable_output, outputs=merged_context)
return processed_data
| def get_remediation_data_command(client: Client, args: dict, no_output_mode: bool) -> List[Dict[str, Any]]:
"""Get SafeBreach remediation data.
Arguments:
client {Client} -- Client derives from BaseClient
args {dict} -- function arguments
no_output_mode {bool} -- if true, this function will insert data to the context,
otherwise, it will just returns the data.
Keyword Arguments:
Returns:
Dict -- Each key is a unique SafeBreach data type.
Each value is a list of the data.
"""
insight_id: Optional[int] = args.get('insightId')
response = client.get_remediation_data(insight_id)
insight: Any = get_insights_command(client, {'insightIds': [insight_id]}, False)
if insight:
insight = insight[0]
if response.status_code < 200 or response.status_code >= 300:
raise DemistoException(f'Failed to fetch remediation data for insight id {insight_id}')
sb_remediation_data = response.json().get('remediationData')
processed_data = extract_data(sb_remediation_data)
readable_output_list = generate_readable_output(processed_data)
vendor_remediation_data = list(filter(lambda o: o['value'],
[{'type': "Splunk", "value": get_splunk_remedation_query(response)}]))
# Demisto Context:
dbot_score_list = []
standard_context_dict = {}
secondary_standard_context_dict: Any = {}
secondary_standard_context_list = []
secondary_path = ''
# SafeBreach Context:
safebreach_context_list = []
safebreach_context = {
"Id": insight_id,
'RawRemediationData': processed_data,
'VendorRemediationData': vendor_remediation_data
}
safebreach_context_list.append(safebreach_context)
for item in processed_data:
if item['type'].startswith('Attack') or len(processed_data) == 0:
continue
standard_context_list: Any = []
demisto_standard_path = get_demisto_context_path(item['type']) # e.g URL(val.Data == obj.Data)
demisto_data_type = SAFEBREACH_TO_DEMISTO_MAPPER.get(item['type']) # SHA256,Port,Protocol,Data,Command,URI
if item['type'] in ['DropPaths', 'URIs', 'URI', 'Command']:
item["value"] = item["value"].encode('utf-8').decode('unicode_escape').encode('latin1').decode('utf-8')
if demisto_data_type:
dbot_score = {
"Indicator": item["value"],
'type': get_dbot_type(item['type'], item["value"]), # TODO: maybe change it to SB_Indicator?
"Vendor": "SafeBreach",
"Score": 3 # TODO: Change to is behaviroal set to defaults
}
primary_standard_context = {
demisto_data_type: item["value"], # e.g Data : <URL>, SHA256:<SHA256>
"Malicious": {
"Description": f"SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}",
"Vendor": "SafeBreach"
}
}
if item['type'] in ['FQDNs/IPs', 'FQDN/IP']:
if re.match(IP_REGEX, item["value"]):
secondary_path = 'IP(val.Address == obj.Address)'
secondary_standard_context_dict = {
'IP': item["value"],
"Malicious": {
"Description": f"SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}",
"Vendor": "SafeBreach"
}
}
else:
secondary_path = 'Domain(val.Name == obj.Name)'
secondary_standard_context_dict = {
'Name': item["value"],
"Malicious": {
"Description": f"SafeBreach Insights - ({insight_id}){insight.get('actionBasedTitle')}",
"Vendor": "SafeBreach"
}
}
if demisto_standard_path:
standard_context_list.append(primary_standard_context)
secondary_standard_context_list.append(secondary_standard_context_dict)
dbot_score_list.append(dbot_score)
if len(standard_context_list) > 0 and demisto_standard_path:
standard_context_dict[demisto_standard_path] = standard_context_list
if secondary_path:
standard_context_dict[secondary_path] = secondary_standard_context_list
output_context = {
"DBotScore(val.Indicator == obj.Indicator)": dbot_score_list,
"SafeBreach.Insight(val.Id == obj.Id)": safebreach_context_list
}
merged_context = {**output_context, **standard_context_dict}
readable_output = tableToMarkdown(name="Remediation Data", t=readable_output_list, removeNull=True)
if no_output_mode:
return_outputs(readable_output=readable_output, outputs=merged_context)
return processed_data
|
3,168 | def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes:
"""
Lag plot for time series.
Parameters
----------
series : Series
The time series to visualize.
lag : int, optional
Lag length of the scatter plot, default 1.
ax : Matplotlib axis object, optional
The matplotlib axis object to use.
**kwds
Matplotlib scatter method keyword arguments.
Returns
-------
matplotlib.axis.Axes
Examples
--------
Lag plots are most commonly used to look for patterns in time series data.
Given the following time series
.. plot::
:context: close-figs
>>> np.random.seed(5)
>>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50))
>>> s = pd.Series(x)
>>> s.plot()
<AxesSubplot:xlabel='Midrange'>
A lag plot with ``lag=1`` returns
.. plot::
:context: close-figs
>>> pd.plotting.lag_plot(s, lag=1)
<AxesSubplot:xlabel='y(t)', ylabel='y(t + 1)'>
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds)
| def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes:
"""
Lag plot for time series.
Parameters
----------
series : Series
The time series to visualize.
lag : int, default 1
Lag length of the scatter plot, default 1.
ax : Matplotlib axis object, optional
The matplotlib axis object to use.
**kwds
Matplotlib scatter method keyword arguments.
Returns
-------
matplotlib.axis.Axes
Examples
--------
Lag plots are most commonly used to look for patterns in time series data.
Given the following time series
.. plot::
:context: close-figs
>>> np.random.seed(5)
>>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50))
>>> s = pd.Series(x)
>>> s.plot()
<AxesSubplot:xlabel='Midrange'>
A lag plot with ``lag=1`` returns
.. plot::
:context: close-figs
>>> pd.plotting.lag_plot(s, lag=1)
<AxesSubplot:xlabel='y(t)', ylabel='y(t + 1)'>
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds)
|
1,769 | def test_convergence_warning():
convergence_warning = "Maximum number of iterations 1 reached."
convergence_warning += " Increase it to improve convergence."
A = np.ones((2, 2))
with pytest.warns(ConvergenceWarning, match=convergence_warning):
NMF(max_iter=1).fit(A)
| def test_convergence_warning():
convergence_warning = "Maximum number of iterations 1 reached."
convergence_warning += " Increase it to improve convergence."
A = np.ones((2, 2))
with pytest.warns(ConvergenceWarning, match=convergence_warning):
NMF(solver=solver, max_iter=1).fit(A)
|
5,577 | def test_add_lat_lon_station_data_not_found():
"""Test case where key cannot be inferred."""
df = pd.DataFrame({'location': ['KOUN']})
with pytest.raises(KeyError):
df = add_station_lat_lon(df)
| def test_add_lat_lon_station_data_not_found():
"""Test case where key cannot be inferred."""
df = pd.DataFrame({'location': ['KOUN']})
with pytest.raises(KeyError):
add_station_lat_lon(df)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.