id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
39,295 |
def test_extract_surface():
# create a single hexahedral cell
lin_pts = np.array([[-1, -1, -1], # node 0
[ 1, -1, -1], # node 1
[ 1, 1, -1], # node 2
[-1, 1, -1], # node 3
[-1, -1, 1], # node 4
[ 1, -1, 1], # node 5
[ 1, 1, 1], # node 6
[-1, 1, 1]], np.double) # node 7
quad_pts = np.array([
(lin_pts[1] + lin_pts[0])/2.0,
(lin_pts[1] + lin_pts[2])/2.0,
(lin_pts[2] + lin_pts[3])/2.0,
(lin_pts[3] + lin_pts[0])/2.0,
(lin_pts[4] + lin_pts[5])/2.0,
(lin_pts[5] + lin_pts[6])/2.0,
(lin_pts[6] + lin_pts[7])/2.0,
(lin_pts[7] + lin_pts[4])/2.0,
(lin_pts[0] + lin_pts[4])/2.0,
(lin_pts[1] + lin_pts[5])/2.0,
(lin_pts[2] + lin_pts[6])/2.0,
(lin_pts[3] + lin_pts[7])/2.0], np.double)
# introduce a minor variation to the location of the mid-side points
quad_pts += np.random.random(quad_pts.shape)*0.25
pts = np.vstack((lin_pts, quad_pts))
cells = np.asarray([[20, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]], dtype=np.int64)
celltypes = np.array([VTK_QUADRATIC_HEXAHEDRON])
if pyvista._vtk.VTK9:
grid = pyvista.UnstructuredGrid(cells, celltypes, pts)
else:
grid = pyvista.UnstructuredGrid(np.array([0]), cells, celltypes, pts)
# expect each face to be divided 6 times since's it has a midside node
surf = grid.extract_surface()
assert surf.n_faces == 36
# expect each face to be divided several more times than the linear extraction
surf_subdivided = grid.extract_surface(subdivision=5)
assert surf_subdivided.n_faces > surf.n_faces
|
def test_extract_surface():
# create a single quadratic hexahedral cell
lin_pts = np.array([[-1, -1, -1], # node 0
[ 1, -1, -1], # node 1
[ 1, 1, -1], # node 2
[-1, 1, -1], # node 3
[-1, -1, 1], # node 4
[ 1, -1, 1], # node 5
[ 1, 1, 1], # node 6
[-1, 1, 1]], np.double) # node 7
quad_pts = np.array([
(lin_pts[1] + lin_pts[0])/2.0,
(lin_pts[1] + lin_pts[2])/2.0,
(lin_pts[2] + lin_pts[3])/2.0,
(lin_pts[3] + lin_pts[0])/2.0,
(lin_pts[4] + lin_pts[5])/2.0,
(lin_pts[5] + lin_pts[6])/2.0,
(lin_pts[6] + lin_pts[7])/2.0,
(lin_pts[7] + lin_pts[4])/2.0,
(lin_pts[0] + lin_pts[4])/2.0,
(lin_pts[1] + lin_pts[5])/2.0,
(lin_pts[2] + lin_pts[6])/2.0,
(lin_pts[3] + lin_pts[7])/2.0], np.double)
# introduce a minor variation to the location of the mid-side points
quad_pts += np.random.random(quad_pts.shape)*0.25
pts = np.vstack((lin_pts, quad_pts))
cells = np.asarray([[20, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]], dtype=np.int64)
celltypes = np.array([VTK_QUADRATIC_HEXAHEDRON])
if pyvista._vtk.VTK9:
grid = pyvista.UnstructuredGrid(cells, celltypes, pts)
else:
grid = pyvista.UnstructuredGrid(np.array([0]), cells, celltypes, pts)
# expect each face to be divided 6 times since's it has a midside node
surf = grid.extract_surface()
assert surf.n_faces == 36
# expect each face to be divided several more times than the linear extraction
surf_subdivided = grid.extract_surface(subdivision=5)
assert surf_subdivided.n_faces > surf.n_faces
|
5,503 |
def create_missing_attributes(existing_attributes, required_attributes):
for name, sendinblue_type in required_attributes.items():
if any(attribute["name"] == name for attribute in existing_attributes):
continue
response = sendinblue.request(
"POST",
f"contacts/attributes/normal/{name}",
json={"type": sendinblue_type},
)
if not response.ok:
message = response.json()["message"]
return [
Error(
f"Error when creating sendinblue attribute '{name}' of type '{sendinblue_type}': {message}",
id=SENDINBLUE_API_ERROR,
)
]
return []
|
def create_missing_attributes(existing_attributes, required_attributes):
for name, sendinblue_type in required_attributes.items():
if any(attribute["name"] == name for attribute in existing_attributes):
continue
response = sendinblue.request(
"POST",
f"contacts/attributes/normal/{name}",
json={"type": sendinblue_type},
)
if not response.ok:
message = response.json()["message"]
return [
Error(
f"Error when creating sendinblue attribute {name!r} of type {sendinblue_type!r}: {message}",
id=SENDINBLUE_API_ERROR,
)
]
return []
|
31,212 |
def test_module(client):
params = {
"page_size": 1,
"page": 1
}
result = client.get_issues(params)
if "issues" in result:
demisto.results('ok')
else:
return_error(result)
|
def test_module(client):
params = {
"page_size": 1,
"page": 1
}
result = client.get_issues(params)
if "issues" in result:
return 'ok'
else:
return_error(result)
|
40,965 |
def migrate_footer_to_static_placeholder(apps, schema_editor):
"""
Create a footer with the new static placeholder from the existing footer pages that were
placed under the "annex" page and displayed in the footer via a `show_menu_below_id`
template tag.
"""
Page = apps.get_model("cms", "Page")
Title = apps.get_model("cms", "Title")
# We should import StaticPlaceholder from apps but its `draft` and `public` fields
# are custom foreign key field that checks that they are targeting an instance of
# # cms.models Placeholder so the code would not work. We can safely assume that the
# Placeholder and StaticPlaceholder models is still there when this migration is run
static_placeholder, was_created = StaticPlaceholder.objects.get_or_create(
code="footer"
)
if not was_created:
# If the static placeholder was already existing, it means this migration is being
# replayed and we better do nothing
return
for is_draft in [False, True]:
# Look for an existing footer page
try:
footer_page = Page.objects.get(
reverse_id="annex", publisher_is_draft=is_draft
)
except Page.DoesNotExist:
return
placeholder = (
static_placeholder.draft if is_draft else static_placeholder.public
)
for language in Title.objects.filter(page=footer_page).values_list(
"language", flat=True
):
# Create the <ul> section to carry the list of links
section = add_plugin(
placeholder,
plugin_type="SectionPlugin",
language=language,
template="richie/section/section_list.html",
)
# Create a <li> link for each page in the exiting footer menu
for page in Page.objects.filter(
node__parent=footer_page.node,
in_navigation=True,
title_set__language=language,
publisher_is_draft=is_draft,
):
title = page.title_set.get(language=language)
add_plugin(
placeholder,
plugin_type="LinkPlugin",
language=language,
internal_link_id=page.id,
name=title.title,
target=section,
)
|
def migrate_footer_to_static_placeholder(apps, schema_editor):
"""
Create a footer with the new static placeholder from the existing footer pages that were
placed under the "annex" page and displayed in the footer via a `show_menu_below_id`
template tag.
"""
Page = apps.get_model("cms", "Page")
Title = apps.get_model("cms", "Title")
# We should import StaticPlaceholder from apps but its `draft` and `public` fields
# are custom foreign key field that checks that they are targeting an instance of
# `cms.models.Placeholder` so the code would not work. We can safely assume that the
# Placeholder and StaticPlaceholder models is still there when this migration is run
static_placeholder, was_created = StaticPlaceholder.objects.get_or_create(
code="footer"
)
if not was_created:
# If the static placeholder was already existing, it means this migration is being
# replayed and we better do nothing
return
for is_draft in [False, True]:
# Look for an existing footer page
try:
footer_page = Page.objects.get(
reverse_id="annex", publisher_is_draft=is_draft
)
except Page.DoesNotExist:
return
placeholder = (
static_placeholder.draft if is_draft else static_placeholder.public
)
for language in Title.objects.filter(page=footer_page).values_list(
"language", flat=True
):
# Create the <ul> section to carry the list of links
section = add_plugin(
placeholder,
plugin_type="SectionPlugin",
language=language,
template="richie/section/section_list.html",
)
# Create a <li> link for each page in the exiting footer menu
for page in Page.objects.filter(
node__parent=footer_page.node,
in_navigation=True,
title_set__language=language,
publisher_is_draft=is_draft,
):
title = page.title_set.get(language=language)
add_plugin(
placeholder,
plugin_type="LinkPlugin",
language=language,
internal_link_id=page.id,
name=title.title,
target=section,
)
|
42,333 |
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either a list or dictionary or plays, or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of:
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param forks: Control Ansible parallel concurrency
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param _input: An optional file or file-like object for use as input in a streaming pipeline
:param _output: An optional file or file-like object for use as output in a streaming pipeline
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param container_options: List of container options to pass to execution engine.
:param resource_profiling: Enable collection of resource utilization data during playbook execution.
:param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
:param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type timeout: int
:type cmdline: str
:type limit: str
:type forks: int
:type quiet: bool
:type verbosity: int
:type streamer: str
:type _input: file
:type _output: file
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type resource_profiling: bool
:type resource_profiling_base_cgroup: str
:type resource_profiling_cpu_poll_interval: float
:type resource_profiling_memory_poll_interval: float
:type resource_profiling_pid_poll_interval: float
:type resource_profiling_results_dir: str
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:type omit_event_data: bool
:type only_failed_event_data: bool
:type check_job_event_data: bool
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing `rc` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
|
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either a list or dictionary of plays, or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of:
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param forks: Control Ansible parallel concurrency
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param _input: An optional file or file-like object for use as input in a streaming pipeline
:param _output: An optional file or file-like object for use as output in a streaming pipeline
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param container_options: List of container options to pass to execution engine.
:param resource_profiling: Enable collection of resource utilization data during playbook execution.
:param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
:param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type timeout: int
:type cmdline: str
:type limit: str
:type forks: int
:type quiet: bool
:type verbosity: int
:type streamer: str
:type _input: file
:type _output: file
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type resource_profiling: bool
:type resource_profiling_base_cgroup: str
:type resource_profiling_cpu_poll_interval: float
:type resource_profiling_memory_poll_interval: float
:type resource_profiling_pid_poll_interval: float
:type resource_profiling_results_dir: str
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:type omit_event_data: bool
:type only_failed_event_data: bool
:type check_job_event_data: bool
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing `rc` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
|
24,868 |
def check_config_2(machine, old_conf, new_conf):
"""Example code must not trigger the message, because the inner block ends with else.
Given an if-elif construct
When the body of the if ends with an else
Then no message shall be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
else:
pass
elif new_conf:
machine.enable(new_conf.value)
|
def not_triggered_if_indented_block_ends_with_else(machine, old_conf, new_conf):
"""Example code must not trigger the message, because the inner block ends with else.
Given an if-elif construct
When the body of the if ends with an else
Then no message shall be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
else:
pass
elif new_conf:
machine.enable(new_conf.value)
|
5,069 |
def setup(ax, text):
"""Place a label at the center of an axes, and remove the axis ticks."""
ax.text(.5, .5, text, transform=ax.transAxes,
horizontalalignment="center", verticalalignment="center")
ax.tick_params(bottom=False, labelbottom=False,
left=False, labelleft=False)
|
def axes_label(ax, text):
"""Place a label at the center of an axes, and remove the axis ticks."""
ax.text(.5, .5, text, transform=ax.transAxes,
horizontalalignment="center", verticalalignment="center")
ax.tick_params(bottom=False, labelbottom=False,
left=False, labelleft=False)
|
2,472 |
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for t in np.sort(np.unique(y)):
plt.scatter(
*X_red[y == t].T,
marker=f"${t}$",
s=50,
c=plt.cm.nipy_spectral(labels[y == t] / 10),
alpha=0.5,
)
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis("off")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
|
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for t in np.sort(np.unique(y)):
plt.scatter(
*X_red[y == t].T,
marker=f"${t}$",
s=50,
c=plt.cm.nipy_spectral(labels[y == digit] / 10),
alpha=0.5,
)
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis("off")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
|
17,514 |
def open_mfdataset(
paths: str | NestedSequence[str | os.PathLike],
chunks: T_Chunks = None,
concat_dim: str
| DataArray
| Index
| Sequence[str]
| Sequence[DataArray]
| Sequence[Index]
| None = None,
compat: CompatOptions = "no_conflicts",
preprocess: Callable[[Dataset], Dataset] | None = None,
engine: T_Engine = None,
data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
coords="different",
combine: Literal["by_coords", "nested"] = "by_coords",
parallel: bool = False,
join: JoinOptions = "outer",
attrs_file: str | os.PathLike | None = None,
combine_attrs: CombineAttrsOptions = "override",
**kwargs,
) -> Dataset:
"""Open multiple files as a single dataset.
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
the datasets into one before returning the result, and if combine='nested' then
``combine_nested`` is used. The filepaths must be structured according to which
combining function is used, the details of which are given in the documentation for
``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'``
will be used. Requires dask to be installed. See documentation for
details on dask [1]_. Global attributes from the ``attrs_file`` are used
for the combined dataset.
Parameters
----------
paths : str or nested sequence of paths
Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of
files to open. Paths can be given as strings or as pathlib Paths. If
concatenation along more than one dimension is desired, then ``paths`` must be a
nested list-of-lists (see ``combine_nested`` for details). (A string glob will
be expanded to a 1-dimensional list.)
chunks : int, dict, 'auto' or None, optional
Dictionary with keys given by dimension names and values given by chunk sizes.
In general, these should divide the dimensions of each dataset. If int, chunk
each dimension by ``chunks``. By default, chunks will be chosen to load entire
input files into memory at once. This has a major impact on performance: please
see the full documentation for more details [2]_.
concat_dim : str, DataArray, Index or a Sequence of these or None, optional
Dimensions to concatenate files along. You only need to provide this argument
if ``combine='nested'``, and if any of the dimensions along which you want to
concatenate is not a dimension in the original datasets, e.g., if you want to
stack a collection of 2D arrays along a third dimension. Set
``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a
particular dimension. Default is None, which for a 1D list of filepaths is
equivalent to opening the files separately and then merging them with
``xarray.merge``.
combine : {"by_coords", "nested"}, optional
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
combine all the data. Default is to use ``xarray.combine_by_coords``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, default: "no_conflicts"
String indicating how to compare variables of the same name for
potential conflicts when merging:
* "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* "equals": all values and dimensions must be the same.
* "identical": all values, dimensions and attributes must be the
same.
* "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
* "override": skip comparing and pick variable from first dataset
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding["source"]``.
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr", None}, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
data_vars : {"minimal", "different", "all"} or list of str, default: "all"
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
coords : {"minimal", "different", "all"} or list of str, optional
These coordinate variables will be concatenated together:
* "minimal": Only coordinates in which the dimension already appears
are included.
* "different": Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* "all": All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the "minimal" coordinates.
parallel : bool, default: False
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
attrs_file : str or path-like, optional
Path of the file used to read global attributes from.
By default global attributes are read from the first file provided,
with wildcard matches sorted by filename.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "override"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
combine_by_coords
combine_nested
open_dataset
Examples
--------
A user might want to pass additional arguments into ``preprocess`` when
applying some operation to many individual files that are being opened. One route
to do this is through the use of ``functools.partial``.
>>> from functools import partial
>>> def _preprocess(x, lon_bnds, lat_bnds):
... return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds))
...
>>> lon_bnds, lat_bnds = (-110, -105), (40, 45)
>>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
>>> ds = xr.open_mfdataset("file_*.nc", concat_dim="time", preprocess=_preprocess)
References
----------
.. [1] https://docs.xarray.dev/en/stable/dask.html
.. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance
"""
if isinstance(paths, str):
if is_remote_uri(paths) and engine == "zarr":
try:
from fsspec.core import get_fs_token_paths
except ImportError as e:
raise ImportError(
"The use of remote URLs for opening zarr requires the package fsspec"
) from e
fs, _, _ = get_fs_token_paths(
paths,
mode="rb",
storage_options=kwargs.get("backend_kwargs", {}).get(
"storage_options", {}
),
expand=False,
)
tmp_paths = fs.glob(fs._strip_protocol(paths)) # finds directories
paths = [fs.get_mapper(path) for path in tmp_paths]
elif is_remote_uri(paths):
raise ValueError(
"cannot do wild-card matching for paths that are remote URLs "
f"unless engine='zarr' is specified. Got paths: {paths}. "
"Instead, supply paths as an explicit list of strings."
)
else:
paths = sorted(glob(_normalize_path(paths)))
elif isinstance(paths, os.PathLike):
paths = [os.fspath(paths)]
else:
paths = [os.fspath(p) if isinstance(p, os.PathLike) else p for p in paths]
if not paths:
raise OSError("no files to open")
if combine == "nested":
if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
concat_dim = [concat_dim] # type: ignore[assignment]
# This creates a flat list which is easier to iterate over, whilst
# encoding the originally-supplied structure as "ids".
# The "ids" are not used at all if combine='by_coords`.
combined_ids_paths = _infer_concat_order_from_positions(paths)
ids, paths = (
list(combined_ids_paths.keys()),
list(combined_ids_paths.values()),
)
elif combine == "by_coords" and concat_dim is not None:
raise ValueError(
"When combine='by_coords', passing a value for `concat_dim` has no "
"effect. To manually combine along a specific dimension you should "
"instead specify combine='nested' along with a value for `concat_dim`.",
)
open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
closers = [getattr_(ds, "_close") for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, closers = dask.compute(datasets, closers)
# Combine all datasets, closing them in case of a ValueError
try:
if combine == "nested":
# Combined nested list by successive concat and merge operations
# along each dimension, using structure given by "ids"
combined = _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=ids,
join=join,
combine_attrs=combine_attrs,
)
elif combine == "by_coords":
# Redo ordering from coordinates, ignoring how they were ordered
# previously
combined = combine_by_coords(
datasets,
compat=compat,
data_vars=data_vars,
coords=coords,
join=join,
combine_attrs=combine_attrs,
)
else:
raise ValueError(
"{} is an invalid option for the keyword argument"
" ``combine``".format(combine)
)
except ValueError:
for ds in datasets:
ds.close()
raise
def multi_file_closer():
for closer in closers:
closer()
combined.set_close(multi_file_closer)
# read global attributes from the attrs_file or from the first dataset
if attrs_file is not None:
if isinstance(attrs_file, os.PathLike):
attrs_file = cast(str, os.fspath(attrs_file))
combined.attrs = datasets[paths.index(attrs_file)].attrs
return combined
|
def open_mfdataset(
paths: str | NestedSequence[str | os.PathLike],
chunks: T_Chunks = None,
concat_dim: str
| DataArray
| Index
| Sequence[str]
| Sequence[DataArray]
| Sequence[Index]
| None = None,
compat: CompatOptions = "no_conflicts",
preprocess: Callable[[Dataset], Dataset] | None = None,
engine: T_Engine = None,
data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
coords="different",
combine: Literal["by_coords", "nested"] = "by_coords",
parallel: bool = False,
join: JoinOptions = "outer",
attrs_file: str | os.PathLike | None = None,
combine_attrs: CombineAttrsOptions = "override",
**kwargs,
) -> Dataset:
"""Open multiple files as a single dataset.
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
the datasets into one before returning the result, and if combine='nested' then
``combine_nested`` is used. The filepaths must be structured according to which
combining function is used, the details of which are given in the documentation for
``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'``
will be used. Requires dask to be installed. See documentation for
details on dask [1]_. Global attributes from the ``attrs_file`` are used
for the combined dataset.
Parameters
----------
paths : str or nested sequence of paths
Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of
files to open. Paths can be given as strings or as pathlib Paths. If
concatenation along more than one dimension is desired, then ``paths`` must be a
nested list-of-lists (see ``combine_nested`` for details). (A string glob will
be expanded to a 1-dimensional list.)
chunks : int, dict, 'auto' or None, optional
Dictionary with keys given by dimension names and values given by chunk sizes.
In general, these should divide the dimensions of each dataset. If int, chunk
each dimension by ``chunks``. By default, chunks will be chosen to load entire
input files into memory at once. This has a major impact on performance: please
see the full documentation for more details [2]_.
concat_dim : str, DataArray, Index or a Sequence of these or None, optional
Dimensions to concatenate files along. You only need to provide this argument
if ``combine='nested'``, and if any of the dimensions along which you want to
concatenate is not a dimension in the original datasets, e.g., if you want to
stack a collection of 2D arrays along a third dimension. Set
``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a
particular dimension. Default is None, which for a 1D list of filepaths is
equivalent to opening the files separately and then merging them with
``xarray.merge``.
combine : {"by_coords", "nested"}, optional
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
combine all the data. Default is to use ``xarray.combine_by_coords``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, default: "no_conflicts"
String indicating how to compare variables of the same name for
potential conflicts when merging:
* "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* "equals": all values and dimensions must be the same.
* "identical": all values, dimensions and attributes must be the
same.
* "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
* "override": skip comparing and pick variable from first dataset
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding["source"]``.
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr", None}, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
data_vars : {"minimal", "different", "all"} or list of str, default: "all"
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
coords : {"minimal", "different", "all"} or list of str, optional
These coordinate variables will be concatenated together:
* "minimal": Only coordinates in which the dimension already appears
are included.
* "different": Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* "all": All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the "minimal" coordinates.
parallel : bool, default: False
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
attrs_file : str or path-like, optional
Path of the file used to read global attributes from.
By default global attributes are read from the first file provided,
with wildcard matches sorted by filename.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "override"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
combine_by_coords
combine_nested
open_dataset
Examples
--------
A user might want to pass additional arguments into ``preprocess`` when
applying some operation to many individual files that are being opened. One route
to do this is through the use of ``functools.partial``.
>>> from functools import partial
>>> def _preprocess(x, lon_bnds, lat_bnds):
... return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds))
...
>>> lon_bnds, lat_bnds = (-110, -105), (40, 45)
>>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
>>> ds = xr.open_mfdataset("file_*.nc", concat_dim="time", preprocess=_preprocess) # doctest: SKIP
References
----------
.. [1] https://docs.xarray.dev/en/stable/dask.html
.. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance
"""
if isinstance(paths, str):
if is_remote_uri(paths) and engine == "zarr":
try:
from fsspec.core import get_fs_token_paths
except ImportError as e:
raise ImportError(
"The use of remote URLs for opening zarr requires the package fsspec"
) from e
fs, _, _ = get_fs_token_paths(
paths,
mode="rb",
storage_options=kwargs.get("backend_kwargs", {}).get(
"storage_options", {}
),
expand=False,
)
tmp_paths = fs.glob(fs._strip_protocol(paths)) # finds directories
paths = [fs.get_mapper(path) for path in tmp_paths]
elif is_remote_uri(paths):
raise ValueError(
"cannot do wild-card matching for paths that are remote URLs "
f"unless engine='zarr' is specified. Got paths: {paths}. "
"Instead, supply paths as an explicit list of strings."
)
else:
paths = sorted(glob(_normalize_path(paths)))
elif isinstance(paths, os.PathLike):
paths = [os.fspath(paths)]
else:
paths = [os.fspath(p) if isinstance(p, os.PathLike) else p for p in paths]
if not paths:
raise OSError("no files to open")
if combine == "nested":
if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
concat_dim = [concat_dim] # type: ignore[assignment]
# This creates a flat list which is easier to iterate over, whilst
# encoding the originally-supplied structure as "ids".
# The "ids" are not used at all if combine='by_coords`.
combined_ids_paths = _infer_concat_order_from_positions(paths)
ids, paths = (
list(combined_ids_paths.keys()),
list(combined_ids_paths.values()),
)
elif combine == "by_coords" and concat_dim is not None:
raise ValueError(
"When combine='by_coords', passing a value for `concat_dim` has no "
"effect. To manually combine along a specific dimension you should "
"instead specify combine='nested' along with a value for `concat_dim`.",
)
open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
closers = [getattr_(ds, "_close") for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, closers = dask.compute(datasets, closers)
# Combine all datasets, closing them in case of a ValueError
try:
if combine == "nested":
# Combined nested list by successive concat and merge operations
# along each dimension, using structure given by "ids"
combined = _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=ids,
join=join,
combine_attrs=combine_attrs,
)
elif combine == "by_coords":
# Redo ordering from coordinates, ignoring how they were ordered
# previously
combined = combine_by_coords(
datasets,
compat=compat,
data_vars=data_vars,
coords=coords,
join=join,
combine_attrs=combine_attrs,
)
else:
raise ValueError(
"{} is an invalid option for the keyword argument"
" ``combine``".format(combine)
)
except ValueError:
for ds in datasets:
ds.close()
raise
def multi_file_closer():
for closer in closers:
closer()
combined.set_close(multi_file_closer)
# read global attributes from the attrs_file or from the first dataset
if attrs_file is not None:
if isinstance(attrs_file, os.PathLike):
attrs_file = cast(str, os.fspath(attrs_file))
combined.attrs = datasets[paths.index(attrs_file)].attrs
return combined
|
49,132 |
def compute_edge_weights(edge_ids, edge_probabilities, beta, threshold):
"""
Convert edge probabilities to energies for the multicut problem.
edge_ids:
The list of edges in the graph. shape=(N, 2)
edge_probabilities:
1-D, float (1.0 means edge is CUT, disconnecting the two SPs)
beta:
scalar (float)
threshold:
scalar (float), moves the 0 of the edge weights (default threshold = 0.5)
Special behavior:
If any node has ID 0, all of it's edges will be given an
artificially low energy, to prevent it from merging with its
neighbors, regardless of what the edge_probabilities say.
"""
def rescale(probabilities, threshold):
"""
Given a threshold in the range (0,1), rescales the probabilities below and above
the threshold to the ranges (0,0.5], and (0.5,1) respectively. This is needed
to implement an effective 'moving' of the 0 weight, since the multicut algorithm
implicitly calculates that weights change sign at p=0.5.
:param probabilities: 1d array (float). Probability data within range (0,1)
:param threshold: scalar (float). The new threshold for the algorithm.
:return: Rescaled data to be used in algorithm.
"""
out = np.zeros_like(probabilities)
data_lower = probabilities[probabilities <= threshold]
data_upper = probabilities[probabilities > threshold]
data_lower = (data_lower / threshold) * 0.5
data_upper = (((data_upper - threshold) / (1 - threshold)) * 0.5) + 0.5
out[probabilities <= threshold] = data_lower
out[probabilities > threshold] = data_upper
return out
p1 = edge_probabilities # P(Edge=CUT)
p1 = np.clip(p1, 0.001, 0.999)
p1 = rescale(p1, threshold)
p0 = 1.0 - p1 # P(Edge=NOT CUT)
edge_weights = np.log(p0 / p1) + np.log((1 - beta) / beta)
# See note special behavior, above
edges_touching_zero = edge_ids[:, 0] == 0
if edges_touching_zero.any():
logger.warning("Volume contains label 0, which will be excluded from the segmentation.")
MINIMUM_ENERGY = -1000.0
edge_weights[edges_touching_zero] = MINIMUM_ENERGY
return edge_weights
|
def compute_edge_weights(edge_ids, edge_probabilities, beta, threshold):
"""
Convert edge probabilities to energies for the multicut problem.
edge_ids:
The list of edges in the graph. shape=(N, 2)
edge_probabilities:
1-D, float (1.0 means edge is CUT, disconnecting the two SPs)
beta:
scalar (float)
threshold:
scalar (float), moves the 0 of the edge weights (default threshold = 0.5)
Special behavior:
If any node has ID 0, all of it's edges will be given an
artificially low energy, to prevent it from merging with its
neighbors, regardless of what the edge_probabilities say.
"""
def rescale(probabilities, threshold):
"""
Given a threshold in the range (0,1), rescales the probabilities below and above
the threshold to the ranges (0,0.5], and (0.5,1) respectively. This is needed
to implement an effective 'moving' of the 0 weight, since the multicut algorithm
implicitly calculates that weights change sign at p=0.5.
:param probabilities: 1d array (float). Probability data within range (0,1)
:param threshold: scalar (float). The new threshold for the algorithm.
:return: Rescaled data to be used in algorithm.
"""
out = np.zeros_like(probabilities)
data_lower = probabilities[probabilities <= threshold]
data_upper = probabilities[probabilities > threshold]
data_lower = (data_lower / threshold) * 0.5
data_upper = (((data_upper - threshold) / (1 - threshold)) * 0.5) + 0.5
out[probabilities <= threshold] = data_lower
out[probabilities > threshold] = data_upper
return out
p1 = edge_probabilities # P(Edge=CUT)
p1 = np.clip(p1, 0.001, 0.999)
p1 = rescale(p1, threshold)
# log((p0 / p1) + log((1-beta) / beta)), where p0 = 1 - p1 is P(Edge=NOT CUT).
edge_weights = np.log(np.recipocral(p1) - 1) + np.log(1 / beta - 1)
# See note special behavior, above
edges_touching_zero = edge_ids[:, 0] == 0
if edges_touching_zero.any():
logger.warning("Volume contains label 0, which will be excluded from the segmentation.")
MINIMUM_ENERGY = -1000.0
edge_weights[edges_touching_zero] = MINIMUM_ENERGY
return edge_weights
|
54,950 |
def get_qnode_creator(device_jacobian, model, diff_method):
"""Returns the class for the specified QNode.
Raises:
ValueError: if an unrecognized ``diff_method`` is defined
Returns:
callable: the QNode class object that will be instantiated
"""
if diff_method is None:
# QNode is not differentiable
return BaseQNode
if device_jacobian and (diff_method == "best"):
# hand off differentiation to the device
return DeviceJacobianQNode
if model in PARAMETER_SHIFT_QNODES and diff_method in ("best", "parameter-shift"):
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
if diff_method in ALLOWED_DIFF_METHODS:
# finite differences
return JacobianQNode
raise ValueError(
"Differentiation method {} not recognized. Allowed "
"options are {}".format(diff_method, ALLOWED_DIFF_METHODS)
)
|
def get_qnode_creator(device_jacobian, model, diff_method):
"""Returns the class for the specified QNode.
Raises:
ValueError: if an unrecognized ``diff_method`` is defined
Returns:
~.BaseQNode: the QNode class object that is compatible with the provided device and
differentiation method
"""
if diff_method is None:
# QNode is not differentiable
return BaseQNode
if device_jacobian and (diff_method == "best"):
# hand off differentiation to the device
return DeviceJacobianQNode
if model in PARAMETER_SHIFT_QNODES and diff_method in ("best", "parameter-shift"):
# parameter-shift analytic differentiation
return PARAMETER_SHIFT_QNODES[model]
if diff_method in ALLOWED_DIFF_METHODS:
# finite differences
return JacobianQNode
raise ValueError(
"Differentiation method {} not recognized. Allowed "
"options are {}".format(diff_method, ALLOWED_DIFF_METHODS)
)
|
33,501 |
def update_and_await_stack(**kwargs):
cloudformation = aws_stack.connect_to_service("cloudformation")
response = cloudformation.update_stack(**kwargs)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
result = await_stack_completion(kwargs["StackName"])
return result
|
def update_and_await_stack(**kwargs):
cloudformation = aws_stack.connect_to_service("cloudformation")
response = cloudformation.update_stack(**kwargs)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
return await_stack_completion(kwargs["StackName"])
|
20,133 |
def setup(
service,
config,
setup_db=True,
register_mq_exchanges=True,
register_signal_handlers=True,
register_internal_trigger_types=False,
run_migrations=True,
register_runners=True,
service_registry=False,
capabilities=None,
config_args=None,
):
"""
Common setup function.
Currently it performs the following operations:
1. Parses config and CLI arguments
2. Establishes DB connection
3. Set log level for all the loggers to DEBUG if --debug flag is present or
if system.debug config option is set to True.
4. Registers RabbitMQ exchanges
5. Registers common signal handlers
6. Register internal trigger types
7. Register all the runners which are installed inside StackStorm virtualenv.
8. Register service in the service registry with the provided capabilities
:param service: Name of the service.
:param config: Config object to use to parse args.
"""
capabilities = capabilities or {}
# Set up logger which logs everything which happens during and before config
# parsing to sys.stdout
logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)
# Parse args to setup config.
if config_args is not None:
config.parse_args(config_args)
else:
config.parse_args()
version = "%s.%s.%s" % (
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
)
# We print locale related info to make it easier to troubleshoot issues where locale is not
# set correctly (e.g. using C / ascii, but services are trying to work with unicode data
# would result in things blowing up)
fs_encoding = sys.getfilesystemencoding()
default_encoding = sys.getdefaultencoding()
lang_env = os.environ.get("LANG", "notset")
pythonipencoding_env = os.environ.get("PYTHONIOENCODING", "notset")
try:
language_code, encoding = locale.getdefaultlocale()
if language_code and encoding:
used_locale = ".".join([language_code, encoding])
else:
used_locale = "unable to retrieve locale"
except Exception as e:
language_code, encoding = "unknown", "unknown"
used_locale = "unable to retrieve locale: %s " % (str(e))
LOG.info("Using Python: %s (%s)" % (version, sys.executable))
LOG.info(
"Using fs encoding: %s, default encoding: %s, locale: %s, LANG env variable: %s, "
"PYTHONIOENCODING env variable: %s"
% (fs_encoding, default_encoding, lang_env, used_locale, pythonipencoding_env)
)
config_file_paths = cfg.CONF.config_file
config_file_paths = [os.path.abspath(path) for path in config_file_paths]
LOG.info("Using config files: %s", ",".join(config_file_paths))
# Setup logging.
logging_config_path = config.get_logging_config_path()
logging_config_path = os.path.abspath(logging_config_path)
LOG.info("Using logging config: %s", logging_config_path)
# Warn on non utf-8 locale which could cause issues when running under Python 3 and working
# with unicode data
if (
fs_encoding.lower() not in VALID_UTF8_ENCODINGS
or encoding.lower() not in VALID_UTF8_ENCODINGS
):
LOG.warning(
NON_UTF8_LOCALE_WARNING_MSG
% (fs_encoding, default_encoding, used_locale.strip())
)
is_debug_enabled = cfg.CONF.debug or cfg.CONF.system.debug
try:
logging.setup(
logging_config_path,
redirect_stderr=cfg.CONF.log.redirect_stderr,
excludes=cfg.CONF.log.excludes,
)
except KeyError as e:
tb_msg = traceback.format_exc()
if "log.setLevel" in tb_msg:
msg = (
"Invalid log level selected. Log level names need to be all uppercase."
)
msg += "\n\n" + getattr(e, "message", six.text_type(e))
raise KeyError(msg)
else:
raise e
exclude_log_levels = [stdlib_logging.AUDIT]
handlers = stdlib_logging.getLoggerClass().manager.root.handlers
for handler in handlers:
# If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid
# duplicate "AUDIT" messages in production deployments where default service log level is
# set to "INFO" and we already log messages with level AUDIT to a special dedicated log
# file.
ignore_audit_log_messages = (
handler.level >= stdlib_logging.INFO
and handler.level < stdlib_logging.AUDIT
)
if not is_debug_enabled and ignore_audit_log_messages:
try:
handler_repr = str(handler)
except TypeError:
# In case handler doesn't have name assigned, repr would throw
handler_repr = "unknown"
LOG.debug(
'Excluding log messages with level "AUDIT" for handler "%s"'
% (handler_repr)
)
handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels))
if not is_debug_enabled:
# NOTE: statsd logger logs everything by default under INFO so we ignore those log
# messages unless verbose / debug mode is used
logging.ignore_statsd_log_messages()
logging.ignore_lib2to3_log_messages()
if is_debug_enabled:
enable_debugging()
else:
# Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2
# ms which cause too much noise
add_global_filters_for_all_loggers()
if cfg.CONF.profile:
enable_profiling()
# All other setup which requires config to be parsed and logging to be correctly setup.
if setup_db:
db_setup()
if register_mq_exchanges:
register_exchanges_with_retry()
if register_signal_handlers:
register_common_signal_handlers()
if register_internal_trigger_types:
triggers.register_internal_trigger_types()
# TODO: This is a "not so nice" workaround until we have a proper migration system in place
if run_migrations:
run_all_rbac_migrations()
if register_runners:
runnersregistrar.register_runners()
register_kombu_serializers()
metrics_initialize()
# Register service in the service registry
if cfg.CONF.coordination.service_registry and service_registry:
# NOTE: It's important that we pass start_heart=True to start the hearbeat process
register_service_in_service_registry(
service=service, capabilities=capabilities, start_heart=True
)
if sys.version_info[0] == 2:
LOG.warning(PYTHON2_DEPRECATION)
|
def setup(
service,
config,
setup_db=True,
register_mq_exchanges=True,
register_signal_handlers=True,
register_internal_trigger_types=False,
run_migrations=True,
register_runners=True,
service_registry=False,
capabilities=None,
config_args=None,
):
"""
Common setup function.
Currently it performs the following operations:
1. Parses config and CLI arguments
2. Establishes DB connection
3. Set log level for all the loggers to DEBUG if --debug flag is present or
if system.debug config option is set to True.
4. Registers RabbitMQ exchanges
5. Registers common signal handlers
6. Register internal trigger types
7. Register all the runners which are installed inside StackStorm virtualenv.
8. Register service in the service registry with the provided capabilities
:param service: Name of the service.
:param config: Config object to use to parse args.
"""
capabilities = capabilities or {}
# Set up logger which logs everything which happens during and before config
# parsing to sys.stdout
logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)
# Parse args to setup config.
if config_args is not None:
config.parse_args(config_args)
else:
config.parse_args()
version = "%s.%s.%s" % (
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
)
# We print locale related info to make it easier to troubleshoot issues where locale is not
# set correctly (e.g. using C / ascii, but services are trying to work with unicode data
# would result in things blowing up)
fs_encoding = sys.getfilesystemencoding()
default_encoding = sys.getdefaultencoding()
lang_env = os.environ.get("LANG", "notset")
pythonioencoding_env = os.environ.get("PYTHONIOENCODING", "notset")
try:
language_code, encoding = locale.getdefaultlocale()
if language_code and encoding:
used_locale = ".".join([language_code, encoding])
else:
used_locale = "unable to retrieve locale"
except Exception as e:
language_code, encoding = "unknown", "unknown"
used_locale = "unable to retrieve locale: %s " % (str(e))
LOG.info("Using Python: %s (%s)" % (version, sys.executable))
LOG.info(
"Using fs encoding: %s, default encoding: %s, locale: %s, LANG env variable: %s, "
"PYTHONIOENCODING env variable: %s"
% (fs_encoding, default_encoding, lang_env, used_locale, pythonipencoding_env)
)
config_file_paths = cfg.CONF.config_file
config_file_paths = [os.path.abspath(path) for path in config_file_paths]
LOG.info("Using config files: %s", ",".join(config_file_paths))
# Setup logging.
logging_config_path = config.get_logging_config_path()
logging_config_path = os.path.abspath(logging_config_path)
LOG.info("Using logging config: %s", logging_config_path)
# Warn on non utf-8 locale which could cause issues when running under Python 3 and working
# with unicode data
if (
fs_encoding.lower() not in VALID_UTF8_ENCODINGS
or encoding.lower() not in VALID_UTF8_ENCODINGS
):
LOG.warning(
NON_UTF8_LOCALE_WARNING_MSG
% (fs_encoding, default_encoding, used_locale.strip())
)
is_debug_enabled = cfg.CONF.debug or cfg.CONF.system.debug
try:
logging.setup(
logging_config_path,
redirect_stderr=cfg.CONF.log.redirect_stderr,
excludes=cfg.CONF.log.excludes,
)
except KeyError as e:
tb_msg = traceback.format_exc()
if "log.setLevel" in tb_msg:
msg = (
"Invalid log level selected. Log level names need to be all uppercase."
)
msg += "\n\n" + getattr(e, "message", six.text_type(e))
raise KeyError(msg)
else:
raise e
exclude_log_levels = [stdlib_logging.AUDIT]
handlers = stdlib_logging.getLoggerClass().manager.root.handlers
for handler in handlers:
# If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid
# duplicate "AUDIT" messages in production deployments where default service log level is
# set to "INFO" and we already log messages with level AUDIT to a special dedicated log
# file.
ignore_audit_log_messages = (
handler.level >= stdlib_logging.INFO
and handler.level < stdlib_logging.AUDIT
)
if not is_debug_enabled and ignore_audit_log_messages:
try:
handler_repr = str(handler)
except TypeError:
# In case handler doesn't have name assigned, repr would throw
handler_repr = "unknown"
LOG.debug(
'Excluding log messages with level "AUDIT" for handler "%s"'
% (handler_repr)
)
handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels))
if not is_debug_enabled:
# NOTE: statsd logger logs everything by default under INFO so we ignore those log
# messages unless verbose / debug mode is used
logging.ignore_statsd_log_messages()
logging.ignore_lib2to3_log_messages()
if is_debug_enabled:
enable_debugging()
else:
# Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2
# ms which cause too much noise
add_global_filters_for_all_loggers()
if cfg.CONF.profile:
enable_profiling()
# All other setup which requires config to be parsed and logging to be correctly setup.
if setup_db:
db_setup()
if register_mq_exchanges:
register_exchanges_with_retry()
if register_signal_handlers:
register_common_signal_handlers()
if register_internal_trigger_types:
triggers.register_internal_trigger_types()
# TODO: This is a "not so nice" workaround until we have a proper migration system in place
if run_migrations:
run_all_rbac_migrations()
if register_runners:
runnersregistrar.register_runners()
register_kombu_serializers()
metrics_initialize()
# Register service in the service registry
if cfg.CONF.coordination.service_registry and service_registry:
# NOTE: It's important that we pass start_heart=True to start the hearbeat process
register_service_in_service_registry(
service=service, capabilities=capabilities, start_heart=True
)
if sys.version_info[0] == 2:
LOG.warning(PYTHON2_DEPRECATION)
|
57,655 |
def upload_url_command(client, args):
url = args.get('url')
res = client.upload_url(url)
res = f"{URL_TYPE}{res}"
outputs = {
'ID': res,
'URL': url,
'Status': 'In Progress',
}
results = CommandResults(
readable_output=f"Url uploaded successfully. Analysis ID: {res}",
outputs_prefix='Polygon.Analysis',
outputs_key_field='ID',
outputs=outputs
)
return_results(results)
return results
|
def upload_url_command(client, args):
url = args.get('url')
res = client.upload_url(url)
res = f"{URL_TYPE}{res}"
outputs = {
'ID': res,
'URL': url,
'Status': 'In Progress',
}
results = CommandResults(
readable_output=f"Url uploaded successfully. Analysis ID: {res}",
outputs_prefix='Polygon.Analysis',
outputs_key_field='ID',
outputs=outputs,
raw_response=res
)
return_results(results)
return results
|
36,463 |
def main():
parser = ArgumentParser(description="""\
Send the contents of a directory as a MIME message.
Unless the -o option is given, the email is sent by forwarding to your local
SMTP server, which then does the normal delivery process. Your local machine
must be running an SMTP server.
""")
parser.add_argument('-d', '--directory',
help="""Mail the contents of the specified directory,
otherwise use the current directory. Only the regular
files in the directory are sent, and we don't recurse to
subdirectories.""")
parser.add_argument('-o', '--output',
metavar='FILE',
help="""Print the composed message to FILE instead of
sending the message to the SMTP server.""")
parser.add_argument('-s', '--sender', required=True,
help='The value of the From: header (required)')
parser.add_argument('-r', '--recipient', required=True,
action='append', metavar='RECIPIENT',
default=[], dest='recipients',
help='A To: header value (at least one required)')
args = parser.parse_args()
directory = args.directory
if not directory:
directory = '.'
context = os.path.abspath(directory)
# Create the message
msg = EmailMessage()
msg['Subject'] = 'Contents of directory {}'.format(context)
msg['To'] = ', '.join(args.recipients)
msg['From'] = args.sender
msg.preamble = 'You will not see this in a MIME-aware mail reader.\n'
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
if not os.path.isfile(path):
continue
# Guess the content type based on the file's extension. Encoding
# will be ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
with open(path, 'rb') as fp:
msg.add_attachment(fp.read(),
maintype=maintype,
subtype=subtype,
filename=filename)
# Now send or store the message
if args.output:
with open(args.output, 'wb') as fp:
fp.write(msg.as_bytes(policy=SMTP))
else:
with smtplib.SMTP('localhost') as s:
s.send_message(msg)
|
def main():
parser = ArgumentParser(description="""\
Send the contents of a directory as a MIME message.
Unless the -o option is given, the email is sent by forwarding to your local
SMTP server, which then does the normal delivery process. Your local machine
must be running an SMTP server.
""")
parser.add_argument('-d', '--directory',
help="""Mail the contents of the specified directory,
otherwise use the current directory. Only the regular
files in the directory are sent, and we don't recurse to
subdirectories.""")
parser.add_argument('-o', '--output',
metavar='FILE',
help="""Print the composed message to FILE instead of
sending the message to the SMTP server.""")
parser.add_argument('-s', '--sender', required=True,
help='The value of the From: header (required)')
parser.add_argument('-r', '--recipient', required=True,
action='append', metavar='RECIPIENT',
default=[], dest='recipients',
help='A To: header value (at least one required)')
args = parser.parse_args()
directory = args.directory
if not directory:
directory = '.'
context = os.path.abspath(directory)
# Create the message
msg = EmailMessage()
msg['Subject'] = f'Contents of directory {context}'
msg['To'] = ', '.join(args.recipients)
msg['From'] = args.sender
msg.preamble = 'You will not see this in a MIME-aware mail reader.\n'
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
if not os.path.isfile(path):
continue
# Guess the content type based on the file's extension. Encoding
# will be ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
with open(path, 'rb') as fp:
msg.add_attachment(fp.read(),
maintype=maintype,
subtype=subtype,
filename=filename)
# Now send or store the message
if args.output:
with open(args.output, 'wb') as fp:
fp.write(msg.as_bytes(policy=SMTP))
else:
with smtplib.SMTP('localhost') as s:
s.send_message(msg)
|
10,653 |
def test_license_set() -> None:
''' Ensure the top-level repo LICENSES.txt always matches the copy in
the Python package folder (needed there when gnerated packages).
'''
chdir(TOP_PATH)
proc = run(["diff", "LICENSE.txt", join("bokeh", "LICENSE.txt")], capture_output=True)
assert proc.returncode == 0, f"LICENSE.txt mismatch:\n{proc.stdout.decode('utf-8')}"
|
def test_license_set() -> None:
''' Ensure the top-level repo LICENSES.txt always matches the copy in
the Python package folder (needed there when generating packages).
'''
chdir(TOP_PATH)
proc = run(["diff", "LICENSE.txt", join("bokeh", "LICENSE.txt")], capture_output=True)
assert proc.returncode == 0, f"LICENSE.txt mismatch:\n{proc.stdout.decode('utf-8')}"
|
25,006 |
def _is_part_of_assignment_target(node: nodes.NodeNG) -> bool:
"""Check whether use of a variable is happening as part of the left-hand
side of an assignment.
This requires recursive checking, because destructuring assignment can have
arbitrarily nested tuples and lists to unpack.
"""
if isinstance(node.parent, nodes.Assign):
return node in node.parent.targets
if isinstance(node.parent, nodes.AugAssign):
return node == node.parent.target
if isinstance(node.parent, (nodes.Tuple, nodes.List)):
return _is_part_of_assignment_target(node.parent)
return None
|
def _is_part_of_assignment_target(node: nodes.NodeNG) -> bool:
"""Check whether use of a variable is happening as part of the left-hand
side of an assignment.
This requires recursive checking, because destructuring assignment can have
arbitrarily nested tuples and lists to unpack.
"""
if isinstance(node.parent, nodes.Assign):
return node in node.parent.targets
if isinstance(node.parent, nodes.AugAssign):
return node == node.parent.target
if isinstance(node.parent, (nodes.Tuple, nodes.List)):
return _is_part_of_assignment_target(node.parent)
return False
|
39,303 |
def vtk_points(points, deep=True):
"""Convert numpy or list of points to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape((-1, 3))
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. \n'
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = _vtk.vtkPoints()
vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep))
return vtkpts
|
def vtk_points(points, deep=True):
"""Convert numpy or list of points to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape((-1, 3))
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. \n'
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
points = np.ascontiguousarray(points)
vtkpts = _vtk.vtkPoints()
vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep))
return vtkpts
|
31,622 |
def main():
params = demisto.params()
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
client = Client(server_url=params.get('server_url'),
use_ssl=not params.get('insecure', False),
proxy=params.get('proxy'),
feed_tags=argToList(params.get('feedTags')),
tlp_color=params.get('tlp_color'),
content_max_size=int(params.get('max_size', '45')))
client.create_indicators_from_response()
if demisto.command() == 'test-module':
# if the client was created successfully and there is data in feed the test is successful.
return_results("ok")
elif demisto.command() == 'rss-get-indicators':
return_results(get_indicators(client, demisto.args()))
elif demisto.command() == 'fetch-indicators':
for iter_ in batch(client.parsed_indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except ValueError:
raise DemistoException("Article content max size must be a number, for example 50.")
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"Failed to execute {command} command.\nError:\n{str(err)}")
|
def main():
params = demisto.params()
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
client = Client(server_url=params.get('server_url'),
use_ssl=not params.get('insecure', False),
proxy=params.get('proxy'),
feed_tags=argToList(params.get('feedTags')),
tlp_color=params.get('tlp_color'),
content_max_size=int(params.get('max_size', '45')))
client.create_indicators_from_response()
if demisto.command() == 'test-module':
# if the client was created successfully and there is data in feed the test is successful.
return_results("ok")
elif demisto.command() == 'rss-get-indicators':
return_results(get_indicators(client, demisto.args()))
elif demisto.command() == 'fetch-indicators':
for iter_ in batch(client.parsed_indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except ValueError:
raise DemistoException("Article content max size must be a number. e.g, 50.")
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"Failed to execute {command} command.\nError:\n{str(err)}")
|
53,198 |
def get_extra_license_file(check_name):
path = os.path.join(get_root(), check_name, 'LICENSE-3rdparty-extra.csv')
return path, file_exists(path)
|
def get_extra_license_files():
for path in os.listdir(get_root()):
if not file_exists(get_manifest_file(path)):
continue
extra_license_file = os.path.join(get_root(), path, 'LICENSE-3rdparty-extra.csv')
if file_exists(extra_license_file):
yield extra_license_file
|
37,811 |
def render_projects(projects: list[Project], *, include_info: bool = True, dest_path: Path):
io = StringIO()
print = functools.partial(builtins.print, file=io)
print(Project.header())
for project in projects:
print(project.table_row())
print()
for project in projects:
print(project.links())
print()
for icon in ICONS:
print(f"[{icon} icon]: {path_for_icon(icon, relative_to=dest_path).as_posix()}")
if include_info:
print()
for project in projects:
print(project.info())
return io.getvalue()
|
def render_projects(projects: list[Project], *, dest_path: Path, include_info: bool = True):
io = StringIO()
print = functools.partial(builtins.print, file=io)
print(Project.header())
for project in projects:
print(project.table_row())
print()
for project in projects:
print(project.links())
print()
for icon in ICONS:
print(f"[{icon} icon]: {path_for_icon(icon, relative_to=dest_path).as_posix()}")
if include_info:
print()
for project in projects:
print(project.info())
return io.getvalue()
|
27,386 |
def split_grid(
grid: np.ndarray,
num_cores: int
) -> Tuple[List[np.ndarray], List[np.ndarray], int, int]:
"""Split the grid into blocks of vertices.
Take the overall `grid` for the system and split it into lists of
square vertices that can be distributed to each core.
Parameters
----------
grid : numpy.array
2D array
num_cores : int
number of partitions to generate
Returns
-------
list_square_vertex_arrays_per_core : array of arrays
split the list of square vertices
``[[v1,v2,v3,v4],[v1,v2,v3,v4],...,...]`` into roughly equally-sized
sublists to be distributed over the available cores on the system
list_parent_index_values : array of arrays
arrays of `[[row, column], [row, column], ...]`` for each core
current_row : int
last row + 1
current_column : int
last column + 1
Note
----
Limited to 2D for now.
"""
# produce an array containing the cartesian coordinates of all vertices in the grid:
x_array, y_array = grid
grid_vertex_cartesian_array = np.dstack((x_array, y_array))
#the grid_vertex_cartesian_array has N_rows, with each row corresponding to a column of coordinates in the grid (
# so a given row has shape N_rows, 2); overall shape (N_columns_in_grid, N_rows_in_a_column, 2)
#although I'll eventually want a pure numpy/scipy/vector-based solution, for now I'll allow loops to simplify the
# division of the cartesian coordinates into a list of the squares in the grid
list_all_squares_in_grid = [] # should eventually be a nested list of all the square vertices in the grid/system
list_parent_index_values = [] # want an ordered list of assignment indices for reconstructing the grid positions
# in the parent process
current_column = 0
while current_column < grid_vertex_cartesian_array.shape[0] - 1:
# go through all the columns except the last one and account for the square vertices (the last column
# has no 'right neighbour')
current_row = 0
while current_row < grid_vertex_cartesian_array.shape[1] - 1:
# all rows except the top row, which doesn't have a row above it for forming squares
bottom_left_vertex_current_square = grid_vertex_cartesian_array[current_column, current_row]
bottom_right_vertex_current_square = grid_vertex_cartesian_array[current_column + 1, current_row]
top_right_vertex_current_square = grid_vertex_cartesian_array[current_column + 1, current_row + 1]
top_left_vertex_current_square = grid_vertex_cartesian_array[current_column, current_row + 1]
#append the vertices of this square to the overall list of square vertices:
list_all_squares_in_grid.append(
[bottom_left_vertex_current_square, bottom_right_vertex_current_square, top_right_vertex_current_square,
top_left_vertex_current_square])
list_parent_index_values.append([current_row, current_column])
current_row += 1
current_column += 1
#split the list of square vertices [[v1,v2,v3,v4],[v1,v2,v3,v4],...,...] into roughly equally-sized sublists to
# be distributed over the available cores on the system:
list_square_vertex_arrays_per_core = np.array_split(list_all_squares_in_grid, num_cores)
list_parent_index_values = np.array_split(list_parent_index_values, num_cores)
return (list_square_vertex_arrays_per_core, list_parent_index_values, current_row, current_column)
|
def split_grid(
grid: np.ndarray,
num_cores: int
) -> Tuple[List[List[int]], List[List[int]], int, int]:
"""Split the grid into blocks of vertices.
Take the overall `grid` for the system and split it into lists of
square vertices that can be distributed to each core.
Parameters
----------
grid : numpy.array
2D array
num_cores : int
number of partitions to generate
Returns
-------
list_square_vertex_arrays_per_core : array of arrays
split the list of square vertices
``[[v1,v2,v3,v4],[v1,v2,v3,v4],...,...]`` into roughly equally-sized
sublists to be distributed over the available cores on the system
list_parent_index_values : array of arrays
arrays of `[[row, column], [row, column], ...]`` for each core
current_row : int
last row + 1
current_column : int
last column + 1
Note
----
Limited to 2D for now.
"""
# produce an array containing the cartesian coordinates of all vertices in the grid:
x_array, y_array = grid
grid_vertex_cartesian_array = np.dstack((x_array, y_array))
#the grid_vertex_cartesian_array has N_rows, with each row corresponding to a column of coordinates in the grid (
# so a given row has shape N_rows, 2); overall shape (N_columns_in_grid, N_rows_in_a_column, 2)
#although I'll eventually want a pure numpy/scipy/vector-based solution, for now I'll allow loops to simplify the
# division of the cartesian coordinates into a list of the squares in the grid
list_all_squares_in_grid = [] # should eventually be a nested list of all the square vertices in the grid/system
list_parent_index_values = [] # want an ordered list of assignment indices for reconstructing the grid positions
# in the parent process
current_column = 0
while current_column < grid_vertex_cartesian_array.shape[0] - 1:
# go through all the columns except the last one and account for the square vertices (the last column
# has no 'right neighbour')
current_row = 0
while current_row < grid_vertex_cartesian_array.shape[1] - 1:
# all rows except the top row, which doesn't have a row above it for forming squares
bottom_left_vertex_current_square = grid_vertex_cartesian_array[current_column, current_row]
bottom_right_vertex_current_square = grid_vertex_cartesian_array[current_column + 1, current_row]
top_right_vertex_current_square = grid_vertex_cartesian_array[current_column + 1, current_row + 1]
top_left_vertex_current_square = grid_vertex_cartesian_array[current_column, current_row + 1]
#append the vertices of this square to the overall list of square vertices:
list_all_squares_in_grid.append(
[bottom_left_vertex_current_square, bottom_right_vertex_current_square, top_right_vertex_current_square,
top_left_vertex_current_square])
list_parent_index_values.append([current_row, current_column])
current_row += 1
current_column += 1
#split the list of square vertices [[v1,v2,v3,v4],[v1,v2,v3,v4],...,...] into roughly equally-sized sublists to
# be distributed over the available cores on the system:
list_square_vertex_arrays_per_core = np.array_split(list_all_squares_in_grid, num_cores)
list_parent_index_values = np.array_split(list_parent_index_values, num_cores)
return (list_square_vertex_arrays_per_core, list_parent_index_values, current_row, current_column)
|
2,547 |
def test_make_blobs_memory_usage():
try:
import memory_profiler
has_memory_profiler = True
except:
has_memory_profiler = False
if not has_memory_profiler:
pytest.skip("memory_profiler is not available.")
blobs_opts = {
"n_samples": 10 ** 4,
"n_features": 10 ** 4,
"centers": 10,
"random_state": 10,
"return_centers": True,
"shuffle": False,
}
# maximum memory usage in MB
actual_memory_usage, (X, y, c) = memory_profiler.memory_usage(
(partial(make_blobs, **blobs_opts), ()),
max_iterations=1,
max_usage=True,
retval=True,
)
memory_usage_X = (
blobs_opts["n_samples"] * blobs_opts["n_features"] * X.dtype.itemsize
)
memory_usage_y = blobs_opts["n_samples"] * y.dtype.itemsize
memory_usage_c = blobs_opts["centers"] * blobs_opts["n_features"] * c.dtype.itemsize
calc_memory_useage_mb = (memory_usage_X + memory_usage_y + memory_usage_c) / 1048576
# make sure actual memory usage is relatively close to theratical amount
assert actual_memory_usage < calc_memory_useage_mb * 1.3
|
def test_make_blobs_memory_usage():
try:
import memory_profiler
has_memory_profiler = True
except:
has_memory_profiler = False
if not has_memory_profiler:
pytest.skip("memory_profiler is not available.")
blobs_opts = {
"n_samples": 10 ** 4,
"n_features": 10 ** 4,
"centers": 10,
"random_state": 10,
"return_centers": True,
"shuffle": False,
}
# maximum memory usage in MB
actual_memory_usage, (X, y, c) = memory_profiler.memory_usage(
(partial(make_blobs, **blobs_opts), ()),
max_iterations=1,
max_usage=True,
retval=True,
)
memory_usage_X = (
blobs_opts["n_samples"] * blobs_opts["n_features"] * X.dtype.itemsize
)
memory_usage_y = blobs_opts["n_samples"] * y.dtype.itemsize
memory_usage_c = blobs_opts["centers"] * blobs_opts["n_features"] * c.dtype.itemsize
calc_memory_useage_mib = (memory_usage_X + memory_usage_y + memory_usage_c) / 2**20
# make sure actual memory usage is relatively close to theratical amount
assert actual_memory_usage < calc_memory_useage_mb * 1.3
|
34,921 |
def upsampling(data,
scale_h=1,
scale_w=1,
layout="NCHW",
method="nearest_neighbor",
align_corners=False):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale_h, w*scale_w)
method indicates the algorithm to be used while calculating the out value
and method can be one of ("bilinear", "nearest_neighbor", "bicubic")
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
scale_h : tvm.relay.Expr
The scale factor for height upsampling.
scale_w : tvm.relay.Expr
The scale factor for width upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, bilinear, bicubic].
align_corners : bool, optional
Whether to keep corners in proper place.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.upsampling(data, scale_h, scale_w, layout, method, align_corners)
|
def upsampling(data,
scale_h=1,
scale_w=1,
layout="NCHW",
method="nearest_neighbor",
align_corners=False):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale_h, w*scale_w)
method indicates the algorithm to be used while calculating the out value
and method can be one of ("bilinear", "nearest_neighbor", "bicubic")
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
scale_h : tvm.relay.Expr
The scale factor for height upsampling.
scale_w : tvm.relay.Expr
The scale factor for width upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, bilinear, bicubic].
align_corners : bool, optional
Whether to keep corners in proper place.
layout : str, optional
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.upsampling(data, scale_h, scale_w, layout, method, align_corners)
|
31,000 |
def main():
command = demisto.command()
params = demisto.params()
report_url = params.get('report_url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
mapper_in = params.get('mapper_in', DEFAULT_MAPPER_IN)
workday_username = params.get('credentials', {}).get('identifier')
workday_password = params.get('credentials', {}).get('password')
LOG(f'Command being called is {command}')
client = Client(
base_url=None,
verify=verify_certificate,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'
},
proxy=proxy,
ok_codes=(200, 204),
auth=requests.auth.HTTPBasicAuth(workday_username, workday_password),
report_url=report_url
)
try:
if command == 'test-module':
return_results(test_module(client, params, mapper_in))
if command == 'fetch-incidents':
'''
Checks if there are events are stored in the integration context.
If yes, it gets it from there. Else, it makes a call to Workday to get a new report
Returns the first x events (x being the fetch limit) and stores the remaining in integration context
'''
workday_context = demisto.getIntegrationContext()
events = workday_context.get('events')
last_run = demisto.getLastRun()
run_first_command = False
if last_run:
if last_run.get("sync_users"):
run_first_command = True
if not run_first_command:
workday_first_run_command(client, mapper_in)
if not events:
# Get the events from Workday by making an API call. Last run is updated only when API call is made
last_run, events = fetch_incidents(
client=client,
last_run=last_run,
fetch_time=params.get('fetch_events_time_minutes'),
mapper_in=mapper_in
)
fetch_limit = int(params.get('max_fetch'))
demisto.setLastRun(last_run)
demisto.incidents(events[:fetch_limit])
# Set the remaining events back to integration context
workday_context = {'events': events[fetch_limit:]}
demisto.setIntegrationContext(workday_context)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command, Error: {e}. Traceback: {traceback.format_exc()}')
|
def main():
command = demisto.command()
params = demisto.params()
report_url = params.get('report_url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
mapper_in = params.get('mapper_in', DEFAULT_MAPPER_IN)
workday_username = params.get('credentials', {}).get('identifier')
workday_password = params.get('credentials', {}).get('password')
LOG(f'Command being called is {command}')
client = Client(
base_url=None,
verify=verify_certificate,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'
},
proxy=proxy,
ok_codes=(200, 204),
auth=requests.auth.HTTPBasicAuth(workday_username, workday_password),
report_url=report_url
)
try:
if command == 'test-module':
return_results(test_module(client, params, mapper_in))
if command == 'fetch-incidents':
'''
Checks if there are events are stored in the integration context.
If yes, it gets it from there. Else, it makes a call to Workday to get a new report
Returns the first x events (x being the fetch limit) and stores the remaining in integration context
'''
workday_context = demisto.getIntegrationContext()
events = workday_context.get('events')
last_run = demisto.getLastRun()
run_first_command = False
if last_run:
if last_run.get("sync_users"):
run_first_command = True
if not run_first_command:
workday_first_run_command(client, mapper_in, report_url)
if not events:
# Get the events from Workday by making an API call. Last run is updated only when API call is made
last_run, events = fetch_incidents(
client=client,
last_run=last_run,
fetch_time=params.get('fetch_events_time_minutes'),
mapper_in=mapper_in
)
fetch_limit = int(params.get('max_fetch'))
demisto.setLastRun(last_run)
demisto.incidents(events[:fetch_limit])
# Set the remaining events back to integration context
workday_context = {'events': events[fetch_limit:]}
demisto.setIntegrationContext(workday_context)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command, Error: {e}. Traceback: {traceback.format_exc()}')
|
43,599 |
def QNode(func, device, *, interface="autograd", mutable=True, diff_method="best", h=None, properties=None):
"""QNode constructor for creating QNodes.
When applied to a quantum function and device, converts it into
a :class:`QNode` instance.
**Example:**
>>> def circuit(x):
>>> qml.RX(x, wires=0)
>>> return qml.expval(qml.PauliZ(0))
>>> dev = qml.device("default.qubit", wires=1)
>>> qnode = QNode(circuit, dev)
Args:
func (callable): a quantum function
device (~.Device): a PennyLane-compatible device
interface (str): The interface that will be used for classical backpropagation.
This affects the types of objects that can be passed to/returned from the QNode:
* ``interface='autograd'``: Allows autograd to backpropogate
through the QNode. The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays.
* ``interface='torch'``: Allows PyTorch to backpropogate
through the QNode.The QNode accepts and returns Torch tensors.
* ``interface='tf'``: Allows TensorFlow in eager mode to backpropogate
through the QNode.The QNode accepts and returns
TensorFlow ``tf.Variable`` and ``tf.tensor`` objects.
* ``None``: The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays. It does not connect to any
machine learning library automatically for backpropagation.
mutable (bool): whether the QNode circuit is mutable
diff_method (str, None): the method of differentiation to use in the created QNode.
* ``"best"``: Best available method. Uses the device directly to compute
the gradient if supported, otherwise will use the analytic parameter-shift
rule where possible with finite-difference as a fallback.
* ``"parameter-shift"``: Use the analytic parameter-shift
rule where possible with finite-difference as a fallback.
* ``"finite-diff"``: Uses numerical finite-differences.
* ``None``: a non-differentiable QNode is returned.
h (float): step size for parameter shift or the finite
difference method
properties (dict[str->Any]): additional keyword properties passed to the QNode
"""
if diff_method is None:
# QNode is not differentiable
return BaseQNode(func, device, mutable=mutable, properties=properties)
if diff_method not in ALLOWED_DIFF_METHODS:
raise ValueError(
"Differentiation method {} not recognized. Allowed "
"options are {}".format(diff_method, ALLOWED_DIFF_METHODS)
)
# Set the default model to qubit, for backwards compatability with existing plugins
# TODO: once all plugins have been updated to add `model` to their
# capabilities dictionary, update the logic here
model = device.capabilities().get("model", "qubit")
device_jacobian = device.capabilities().get("provides_jacobian", False)
if device_jacobian and (diff_method == "best"):
# hand off differentiation to the device
node = DeviceJacobianQNode(func, device, mutable=mutable, properties=properties)
elif model in PARAMETER_SHIFT_QNODES and diff_method in ("best", "parameter-shift"):
# parameter-shift analytic differentiation
node = PARAMETER_SHIFT_QNODES[model](func, device, mutable=mutable, properties=properties)
else:
# finite differences
node = JacobianQNode(func, device, mutable=mutable, h=h, properties=properties)
if interface == "torch":
return node.to_torch()
if interface == "tf":
return node.to_tf()
if interface in ("autograd", "numpy"):
# keep "numpy" for backwards compatibility
return node.to_autograd()
if interface is None:
# if no interface is specified, return the 'bare' QNode
return node
raise ValueError(
"Interface {} not recognized. Allowed "
"interfaces are {}".format(diff_method, ALLOWED_INTERFACES)
)
|
def QNode(func, device, *, interface="autograd", mutable=True, diff_method="best", h=None, properties=None):
"""QNode constructor for creating QNodes.
When applied to a quantum function and device, converts it into
a :class:`QNode` instance.
**Example:**
>>> def circuit(x):
>>> qml.RX(x, wires=0)
>>> return qml.expval(qml.PauliZ(0))
>>> dev = qml.device("default.qubit", wires=1)
>>> qnode = QNode(circuit, dev)
Args:
func (callable): a quantum function
device (~.Device): a PennyLane-compatible device
interface (str): The interface that will be used for classical backpropagation.
This affects the types of objects that can be passed to/returned from the QNode:
* ``interface='autograd'``: Allows autograd to backpropogate
through the QNode. The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays.
* ``interface='torch'``: Allows PyTorch to backpropogate
through the QNode.The QNode accepts and returns Torch tensors.
* ``interface='tf'``: Allows TensorFlow in eager mode to backpropogate
through the QNode.The QNode accepts and returns
TensorFlow ``tf.Variable`` and ``tf.tensor`` objects.
* ``None``: The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays. It does not connect to any
machine learning library automatically for backpropagation.
mutable (bool): whether the QNode circuit is mutable
diff_method (str, None): the method of differentiation to use in the created QNode.
* ``"best"``: Best available method. Uses the device directly to compute
the gradient if supported, otherwise will use the analytic parameter-shift
rule where possible with finite-difference as a fallback.
* ``"parameter-shift"``: Use the analytic parameter-shift
rule where possible with finite-difference as a fallback.
* ``"finite-diff"``: Uses numerical finite-differences.
* ``None``: a non-differentiable QNode is returned.
h (float): step size for the finite-difference method
properties (dict[str->Any]): additional keyword properties passed to the QNode
"""
if diff_method is None:
# QNode is not differentiable
return BaseQNode(func, device, mutable=mutable, properties=properties)
if diff_method not in ALLOWED_DIFF_METHODS:
raise ValueError(
"Differentiation method {} not recognized. Allowed "
"options are {}".format(diff_method, ALLOWED_DIFF_METHODS)
)
# Set the default model to qubit, for backwards compatability with existing plugins
# TODO: once all plugins have been updated to add `model` to their
# capabilities dictionary, update the logic here
model = device.capabilities().get("model", "qubit")
device_jacobian = device.capabilities().get("provides_jacobian", False)
if device_jacobian and (diff_method == "best"):
# hand off differentiation to the device
node = DeviceJacobianQNode(func, device, mutable=mutable, properties=properties)
elif model in PARAMETER_SHIFT_QNODES and diff_method in ("best", "parameter-shift"):
# parameter-shift analytic differentiation
node = PARAMETER_SHIFT_QNODES[model](func, device, mutable=mutable, properties=properties)
else:
# finite differences
node = JacobianQNode(func, device, mutable=mutable, h=h, properties=properties)
if interface == "torch":
return node.to_torch()
if interface == "tf":
return node.to_tf()
if interface in ("autograd", "numpy"):
# keep "numpy" for backwards compatibility
return node.to_autograd()
if interface is None:
# if no interface is specified, return the 'bare' QNode
return node
raise ValueError(
"Interface {} not recognized. Allowed "
"interfaces are {}".format(diff_method, ALLOWED_INTERFACES)
)
|
31,946 |
def prettify_rule(rule: dict):
pretty_rule = {
'Name': rule['@name'],
'Action': rule['action']
}
if DEVICE_GROUP:
pretty_rule['DeviceGroup'] = DEVICE_GROUP
if '@loc' in rule:
pretty_rule['Location'] = rule['@loc']
if 'category' in rule and isinstance(rule['category'], dict) and 'member' in rule['category']:
pretty_rule['CustomUrlCategory'] = rule['category']['member']
if 'application' in rule and isinstance(rule['application'], dict) and 'member' in rule['application']:
pretty_rule['Application'] = rule['application']['member']
if 'destination' in rule and isinstance(rule['destination'], dict) and 'member' in rule['destination']:
pretty_rule['Destination'] = rule['destination']['member']
if 'from' in rule and isinstance(rule['from'], dict) and 'member' in rule['from']:
pretty_rule['From'] = rule['from']['member']
if 'service' in rule and isinstance(rule['service'], dict) and 'member' in rule['service']:
pretty_rule['Service'] = rule['service']['member']
if 'to' in rule and isinstance(rule['to'], dict) and 'member' in rule['to']:
pretty_rule['To'] = rule['to']['member']
if 'source' in rule and isinstance(rule['source'], dict) and 'member' in rule['source']:
pretty_rule['Source'] = rule['source']['member']
if 'tag' in rule and isinstance(rule['tag'], dict) and 'member' in rule['tag']:
pretty_rule['Tags'] = rule['tag']['member']
if 'log-setting' in rule and isinstance(rule['log-setting'], dict) and '#text' in rule['log-setting']:
pretty_rule['LogForwardingProfile'] = rule['log-setting']['#text']
return pretty_rule
|
def prettify_rule(rule: dict):
pretty_rule = {
'Name': rule['@name'],
'Action': rule['action']
}
if DEVICE_GROUP:
pretty_rule['DeviceGroup'] = DEVICE_GROUP
if '@loc' in rule:
pretty_rule['Location'] = rule['@loc']
if isinstance(rule.get('category'), dict) and 'member' in rule['category']:
pretty_rule['CustomUrlCategory'] = rule['category']['member']
if 'application' in rule and isinstance(rule['application'], dict) and 'member' in rule['application']:
pretty_rule['Application'] = rule['application']['member']
if 'destination' in rule and isinstance(rule['destination'], dict) and 'member' in rule['destination']:
pretty_rule['Destination'] = rule['destination']['member']
if 'from' in rule and isinstance(rule['from'], dict) and 'member' in rule['from']:
pretty_rule['From'] = rule['from']['member']
if 'service' in rule and isinstance(rule['service'], dict) and 'member' in rule['service']:
pretty_rule['Service'] = rule['service']['member']
if 'to' in rule and isinstance(rule['to'], dict) and 'member' in rule['to']:
pretty_rule['To'] = rule['to']['member']
if 'source' in rule and isinstance(rule['source'], dict) and 'member' in rule['source']:
pretty_rule['Source'] = rule['source']['member']
if 'tag' in rule and isinstance(rule['tag'], dict) and 'member' in rule['tag']:
pretty_rule['Tags'] = rule['tag']['member']
if 'log-setting' in rule and isinstance(rule['log-setting'], dict) and '#text' in rule['log-setting']:
pretty_rule['LogForwardingProfile'] = rule['log-setting']['#text']
return pretty_rule
|
7,423 |
def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
r"""Wiener-Hunt deconvolution
Return the deconvolution with a Wiener-Hunt approach (i.e. with
Fourier diagonalisation).
Parameters
----------
image : (M, N) ndarray
Input degraded image
psf : ndarray
Point Spread Function. This is assumed to be the impulse
response (input image space) if the data-type is real, or the
transfer function (Fourier space) if the data-type is
complex. There is no constraints on the shape of the impulse
response. The transfer function must be of shape `(M, N)` if
`is_real is True`, `(M, N // 2 + 1)` otherwise (see
`np.fft.rfftn`).
balance : float
The regularisation parameter value that tunes the balance
between the data adequacy that improve frequency restoration
and the prior adequacy that reduce frequency restoration (to
avoid noise artifacts).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the
psf. Shape constraint is the same as for the `psf` parameter.
is_real : boolean, optional
True by default. Specify if ``psf`` and ``reg`` are provided
with hermitian hypothesis, that is only half of the frequency
plane is provided (due to the redundancy of Fourier transform
of real signal). It's apply only if ``psf`` and/or ``reg`` are
provided as transfer function. For the hermitian property see
``uft`` module or ``np.fft.rfftn``.
clip : boolean, optional
True by default. If True, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
im_deconv : (M, N) ndarray
The deconvolved image.
Examples
--------
>>> from skimage import color, data, restoration
>>> img = color.rgb2gray(data.astronaut())
>>> from scipy.signal import convolve2d
>>> psf = np.ones((5, 5)) / 25
>>> img = convolve2d(img, psf, 'same')
>>> rng = np.random.default_rng()
>>> img += 0.1 * img.std() * rng.standard_normal(img.shape)
>>> deconvolved_img = restoration.wiener(img, psf, 0.1)
Notes
-----
This function applies the Wiener filter to a noisy and degraded
image by an impulse response (or PSF). If the data model is
.. math:: y = Hx + n
where :math:`n` is noise, :math:`H` the PSF and :math:`x` the
unknown original image, the Wiener filter is
.. math::
\hat x = F^\dagger (|\Lambda_H|^2 + \lambda |\Lambda_D|^2)
\Lambda_H^\dagger F y
where :math:`F` and :math:`F^\dagger` are the Fourier and inverse
Fourier transforms respectively, :math:`\Lambda_H` the transfer
function (or the Fourier transform of the PSF, see [Hunt] below)
and :math:`\Lambda_D` the filter to penalize the restored image
frequencies (Laplacian by default, that is penalization of high
frequency). The parameter :math:`\lambda` tunes the balance
between the data (that tends to increase high frequency, even
those coming from noise), and the regularization.
These methods are then specific to a prior model. Consequently,
the application or the true image nature must correspond to the
prior model. By default, the prior model (Laplacian) introduce
image smoothness or pixel correlation. It can also be interpreted
as high-frequency penalization to compensate the instability of
the solution with respect to the data (sometimes called noise
amplification or "explosive" solution).
Finally, the use of Fourier space implies a circulant property of
:math:`H`, see [2]_.
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
https://hal.archives-ouvertes.fr/hal-00674508/document
.. [2] B. R. Hunt "A matrix theory proof of the discrete
convolution theorem", IEEE Trans. on Audio and
Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971
"""
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not np.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.real.astype(float_type, copy=False)
reg = reg.real.astype(float_type, copy=False)
if psf.shape != reg.shape:
trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_func = psf
wiener_filter = np.conj(trans_func) / (np.abs(trans_func) ** 2 +
balance * np.abs(reg) ** 2)
if is_real:
deconv = uft.uirfft2(wiener_filter * uft.urfft2(image),
shape=image.shape)
else:
deconv = uft.uifft2(wiener_filter * uft.ufft2(image))
if clip:
deconv[deconv > 1] = 1
deconv[deconv < -1] = -1
return deconv
|
def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
r"""Wiener-Hunt deconvolution
Return the deconvolution with a Wiener-Hunt approach (i.e. with
Fourier diagonalisation).
Parameters
----------
image : (M, N) ndarray
Input degraded image
psf : ndarray
Point Spread Function. This is assumed to be the impulse
response (input image space) if the data-type is real, or the
transfer function (Fourier space) if the data-type is
complex. There is no constraints on the shape of the impulse
response. The transfer function must be of shape `(M, N)` if
`is_real is True`, `(M, N // 2 + 1)` otherwise (see
`np.fft.rfftn`).
balance : float
The regularisation parameter value that tunes the balance
between the data adequacy that improve frequency restoration
and the prior adequacy that reduce frequency restoration (to
avoid noise artifacts).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the
psf. Shape constraint is the same as for the `psf` parameter.
is_real : boolean, optional
True by default. Specify if ``psf`` and ``reg`` are provided
with hermitian hypothesis, that is only half of the frequency
plane is provided (due to the redundancy of Fourier transform
of real signal). It's apply only if ``psf`` and/or ``reg`` are
provided as transfer function. For the hermitian property see
``uft`` module or ``np.fft.rfftn``.
clip : boolean, optional
True by default. If True, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
im_deconv : (M, N) ndarray
The deconvolved image.
Examples
--------
>>> from skimage import color, data, restoration
>>> img = color.rgb2gray(data.astronaut())
>>> from scipy.signal import convolve2d
>>> psf = np.ones((5, 5)) / 25
>>> img = convolve2d(img, psf, 'same')
>>> rng = np.random.default_rng()
>>> img += 0.1 * img.std() * rng.standard_normal(img.shape)
>>> deconvolved_img = restoration.wiener(img, psf, 0.1)
Notes
-----
This function applies the Wiener filter to a noisy and degraded
image by an impulse response (or PSF). If the data model is
.. math:: y = Hx + n
where :math:`n` is noise, :math:`H` the PSF and :math:`x` the
unknown original image, the Wiener filter is
.. math::
\hat x = F^\dagger (|\Lambda_H|^2 + \lambda |\Lambda_D|^2)
\Lambda_H^\dagger F y
where :math:`F` and :math:`F^\dagger` are the Fourier and inverse
Fourier transforms respectively, :math:`\Lambda_H` the transfer
function (or the Fourier transform of the PSF, see [Hunt] below)
and :math:`\Lambda_D` the filter to penalize the restored image
frequencies (Laplacian by default, that is penalization of high
frequency). The parameter :math:`\lambda` tunes the balance
between the data (that tends to increase high frequency, even
those coming from noise), and the regularization.
These methods are then specific to a prior model. Consequently,
the application or the true image nature must correspond to the
prior model. By default, the prior model (Laplacian) introduce
image smoothness or pixel correlation. It can also be interpreted
as high-frequency penalization to compensate the instability of
the solution with respect to the data (sometimes called noise
amplification or "explosive" solution).
Finally, the use of Fourier space implies a circulant property of
:math:`H`, see [2]_.
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
https://hal.archives-ouvertes.fr/hal-00674508
.. [2] B. R. Hunt "A matrix theory proof of the discrete
convolution theorem", IEEE Trans. on Audio and
Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971
"""
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not np.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.real.astype(float_type, copy=False)
reg = reg.real.astype(float_type, copy=False)
if psf.shape != reg.shape:
trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_func = psf
wiener_filter = np.conj(trans_func) / (np.abs(trans_func) ** 2 +
balance * np.abs(reg) ** 2)
if is_real:
deconv = uft.uirfft2(wiener_filter * uft.urfft2(image),
shape=image.shape)
else:
deconv = uft.uifft2(wiener_filter * uft.ufft2(image))
if clip:
deconv[deconv > 1] = 1
deconv[deconv < -1] = -1
return deconv
|
13,783 |
def render_body(raw_body):
"""
Render raw_body to HTML.
This includes the following steps:
* Convert Markdown to HTML
* Strip non-whitelisted HTML
* Remove unbalanced HTML tags
Note that this does not prevent Markdown syntax inside a MathJax block from
being processed, which the forums JavaScript code does.
"""
rendered = markdown.markdown(raw_body)
rendered = bleach.clean(
rendered,
tags=bleach.ALLOWED_TAGS + [
"dl", "pre", "p", "br", "sup", "strike", "sub", "del", "h1", "h2", "h3", "h4", "blockquote", "dd", "dl", "dt", "kbd", "pre", "s", "hr", "img"
],
protocols=["http", "https", "ftp", "mailto"],
strip=True,
attributes={
"a": ["href", "title"],
"img": ["src", "alt", "title", "width", "height"],
}
)
# rendered = _sanitize_html(rendered)
# rendered = _remove_unpaired_tags(rendered)
return rendered
|
def render_body(raw_body):
"""
Render raw_body to HTML.
This includes the following steps:
* Convert Markdown to HTML
* Strip non-whitelisted HTML
* Remove unbalanced HTML tags
Note that this does not prevent Markdown syntax inside a MathJax block from
being processed, which the forums JavaScript code does.
"""
rendered = markdown.markdown(raw_body)
sanitized_html = bleach.clean(
rendered,
tags=bleach.ALLOWED_TAGS + [
"dl", "pre", "p", "br", "sup", "strike", "sub", "del", "h1", "h2", "h3", "h4", "blockquote", "dd", "dl", "dt", "kbd", "pre", "s", "hr", "img"
],
protocols=["http", "https", "ftp", "mailto"],
strip=True,
attributes={
"a": ["href", "title"],
"img": ["src", "alt", "title", "width", "height"],
}
)
# rendered = _sanitize_html(rendered)
# rendered = _remove_unpaired_tags(rendered)
return rendered
|
17,876 |
def _mantel_stats_pearson(x, y, permutations):
"""Compute original and permuted stats using pearsonr.
Parameters
----------
x, y : DistanceMatrix
Input distance matrices to compare.
permutations : int
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and
permuted_stats will be an empty array.
Returns
-------
orig_stat : 1D array_like
Correlation coefficient of the test.
permuted_stats : 1D array_like
Permutted correlation coefficients of the test.
"""
y_flat = y.condensed_form()
return _mantel_stats_pearson_flat(x, y_flat, permutations)
|
def _mantel_stats_pearson(x, y, permutations):
"""Compute original and permuted stats using pearsonr.
Parameters
----------
x, y : DistanceMatrix
Input distance matrices to compare.
permutations : int
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and
permuted_stats will be an empty array.
Returns
-------
orig_stat : 1D array_like
Correlation coefficient of the test.
permuted_stats : 1D array_like
Permuted correlation coefficients of the test.
"""
y_flat = y.condensed_form()
return _mantel_stats_pearson_flat(x, y_flat, permutations)
|
7,675 |
def is_broken_symlink(path):
"""Checks whether a file is a broken symlink
:param path: The path to the file
"""
return os.path.islink(path) and not os.path.exists(path)
|
def is_broken_symlink(path):
"""Check whether a file is a broken symlink.
:param path: The path to the file
"""
return os.path.islink(path) and not os.path.exists(path)
|
55,839 |
def run_fork_test(spec, pre_state):
yield 'pre', pre_state
post_state = spec.upgrade_to_lightclient_patch(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == spec.LIGHTCLIENT_PATCH_FORK_VERSION
assert post_state.fork.epoch == spec.get_current_epoch(post_state)
yield 'post', post_state
|
def run_fork_test(spec, pre_state):
yield 'pre', pre_state
post_state = spec.upgrade_to_lightclient_patch(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.LIGHTCLIENT_PATCH_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state
|
20,541 |
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"You need to provide a mask with -method {method}.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Input mask dimension: {dim}. Input dimension for the mask should be 3.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the number of volumes with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("A noise mask is mandatory with '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
|
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"Argument '-m' must be specified when using '-method {method}'.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Input mask dimension: {dim}. Input dimension for the mask should be 3.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the number of volumes with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("A noise mask is mandatory with '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
|
39,277 |
def Arrow(start=(0.,0.,0.), direction=(1.,0.,0.), tip_length=0.25,
tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,
shaft_resolution=20, scale=None):
"""Create a vtk Arrow.
Parameters
----------
start : np.ndarray
Start location in [x, y, z]
direction : list or np.ndarray
Direction the arrow points to in [x, y, z]
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
tip_resolution : int, optional
Number of faces around the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft.
scale : float or str, optional
Scale factor of the entire object, default is None (i.e. scale of 1).
'auto' scales to length of direction array.
Return
------
arrow : pyvista.PolyData
Arrow surface.
"""
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetTipResolution(tip_resolution)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = pyvista.PolyData(arrow.GetOutput())
if scale == 'auto':
scale = float(np.linalg.norm(direction))
if isinstance(scale, float) or isinstance(scale, int):
surf.points *= scale
elif scale is not None:
raise("Scale must be either float, int or 'auto'.")
translate(surf, start, direction)
return surf
|
def Arrow(start=(0.,0.,0.), direction=(1.,0.,0.), tip_length=0.25,
tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,
shaft_resolution=20, scale=None):
"""Create a vtk Arrow.
Parameters
----------
start : np.ndarray
Start location in [x, y, z]
direction : list or np.ndarray
Direction the arrow points to in [x, y, z]
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
tip_resolution : int, optional
Number of faces around the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft.
scale : float or str, optional
Scale factor of the entire object, default is None (i.e. scale of 1).
'auto' scales to length of direction array.
Return
------
arrow : pyvista.PolyData
Arrow surface.
"""
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetTipResolution(tip_resolution)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = pyvista.PolyData(arrow.GetOutput())
if scale == 'auto':
scale = float(np.linalg.norm(direction))
if isinstance(scale, float) or isinstance(scale, int):
surf.points *= scale
elif scale is not None:
raise TypeError("Scale must be either float, int or 'auto'.")
translate(surf, start, direction)
return surf
|
59,142 |
def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
|
def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by
:func:`~scipy.spatial.distance.pdist`.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
|
35,568 |
def cross_validation(model, horizon, period=None, initial=None, parallel=None, cutoffs=None, disable_tqdm=False):
"""Cross-Validation for time series.
Computes forecasts from historical cutoff points, which user can input.
If not provided, begins from (end - horizon) and works backwards, making
cutoffs with a spacing of period until initial is reached.
When period is equal to the time interval of the data, this is the
technique described in https://robjhyndman.com/hyndsight/tscv/ .
Parameters
----------
model: Prophet class object. Fitted Prophet model.
horizon: string with pd.Timedelta compatible style, e.g., '5 days',
'3 hours', '10 seconds'.
period: string with pd.Timedelta compatible style. Simulated forecast will
be done at every this period. If not provided, 0.5 * horizon is used.
initial: string with pd.Timedelta compatible style. The first training
period will include at least this much data. If not provided,
3 * horizon is used.
cutoffs: list of pd.Timestamp specifying cutoffs to be used during
cross validation. If not provided, they are generated as described
above.
parallel : {None, 'processes', 'threads', 'dask', object}
How to parallelize the forecast computation. By default no parallelism
is used.
* None : No parallelism.
* 'processes' : Parallelize with concurrent.futures.ProcessPoolExectuor.
* 'threads' : Parallelize with concurrent.futures.ThreadPoolExecutor.
Note that some operations currently hold Python's Global Interpreter
Lock, so parallelizing with threads may be slower than training
sequentially.
* 'dask': Parallelize with Dask.
This requires that a dask.distributed Client be created.
* object : Any instance with a `.map` method. This method will
be called with :func:`single_cutoff_forecast` and a sequence of
iterables where each element is the tuple of arguments to pass to
:func:`single_cutoff_forecast`
.. code-block::
class MyBackend:
def map(self, func, *iterables):
results = [
func(*args)
for args in zip(*iterables)
]
return results
Returns
-------
A pd.DataFrame with the forecast, actual value and cutoff.
"""
df = model.history.copy().reset_index(drop=True)
horizon = pd.Timedelta(horizon)
predict_columns = ['ds', 'yhat']
if model.uncertainty_samples:
predict_columns.extend(['yhat_lower', 'yhat_upper'])
# Identify largest seasonality period
period_max = 0.
for s in model.seasonalities.values():
period_max = max(period_max, s['period'])
seasonality_dt = pd.Timedelta(str(period_max) + ' days')
if cutoffs is None:
# Set period
period = 0.5 * horizon if period is None else pd.Timedelta(period)
# Set initial
if initial is None:
initial = max(3 * horizon, seasonality_dt)
else:
initial = pd.Timedelta(initial)
# Compute Cutoffs
cutoffs = generate_cutoffs(df, horizon, initial, period)
else:
initial = cutoffs[0] - df['ds'].min()
# Check if the initial window
# (that is, the amount of time between the start of the history and the first cutoff)
# is less than the maximum seasonality period
if initial < seasonality_dt:
msg = 'Seasonality has period of {} days '.format(period_max)
msg += 'which is larger than initial window. '
msg += 'Consider increasing initial.'
logger.warning(msg)
if parallel:
valid = {"threads", "processes", "dask"}
if parallel == "threads":
pool = concurrent.futures.ThreadPoolExecutor()
elif parallel == "processes":
pool = concurrent.futures.ProcessPoolExecutor()
elif parallel == "dask":
try:
from dask.distributed import get_client
except ImportError as e:
raise ImportError("parallel='dask' requies the optional "
"dependency dask.") from e
pool = get_client()
# delay df and model to avoid large objects in task graph.
df, model = pool.scatter([df, model])
elif hasattr(parallel, "map"):
pool = parallel
else:
msg = ("'parallel' should be one of {} for an instance with a "
"'map' method".format(', '.join(valid)))
raise ValueError(msg)
iterables = ((df, model, cutoff, horizon, predict_columns)
for cutoff in cutoffs)
iterables = zip(*iterables)
logger.info("Applying in parallel with %s", pool)
predicts = pool.map(single_cutoff_forecast, *iterables)
if parallel == "dask":
# convert Futures to DataFrames
predicts = pool.gather(predicts)
else:
predicts = [
single_cutoff_forecast(df, model, cutoff, horizon, predict_columns)
for cutoff in (tqdm(cutoffs) if disable_tqdm==False else cutoffs)
]
# Combine all predicted pd.DataFrame into one pd.DataFrame
return pd.concat(predicts, axis=0).reset_index(drop=True)
|
def cross_validation(model, horizon, period=None, initial=None, parallel=None, cutoffs=None, disable_tqdm=False):
"""Cross-Validation for time series.
Computes forecasts from historical cutoff points, which user can input.
If not provided, begins from (end - horizon) and works backwards, making
cutoffs with a spacing of period until initial is reached.
When period is equal to the time interval of the data, this is the
technique described in https://robjhyndman.com/hyndsight/tscv/ .
Parameters
----------
model: Prophet class object. Fitted Prophet model.
horizon: string with pd.Timedelta compatible style, e.g., '5 days',
'3 hours', '10 seconds'.
period: string with pd.Timedelta compatible style. Simulated forecast will
be done at every this period. If not provided, 0.5 * horizon is used.
initial: string with pd.Timedelta compatible style. The first training
period will include at least this much data. If not provided,
3 * horizon is used.
cutoffs: list of pd.Timestamp specifying cutoffs to be used during
cross validation. If not provided, they are generated as described
above.
parallel : {None, 'processes', 'threads', 'dask', object}
How to parallelize the forecast computation. By default no parallelism
is used.
* None : No parallelism.
* 'processes' : Parallelize with concurrent.futures.ProcessPoolExectuor.
* 'threads' : Parallelize with concurrent.futures.ThreadPoolExecutor.
Note that some operations currently hold Python's Global Interpreter
Lock, so parallelizing with threads may be slower than training
sequentially.
* 'dask': Parallelize with Dask.
This requires that a dask.distributed Client be created.
* object : Any instance with a `.map` method. This method will
be called with :func:`single_cutoff_forecast` and a sequence of
iterables where each element is the tuple of arguments to pass to
:func:`single_cutoff_forecast`
.. code-block::
class MyBackend:
def map(self, func, *iterables):
results = [
func(*args)
for args in zip(*iterables)
]
return results
Returns
-------
A pd.DataFrame with the forecast, actual value and cutoff.
"""
df = model.history.copy().reset_index(drop=True)
horizon = pd.Timedelta(horizon)
predict_columns = ['ds', 'yhat']
if model.uncertainty_samples:
predict_columns.extend(['yhat_lower', 'yhat_upper'])
# Identify largest seasonality period
period_max = 0.
for s in model.seasonalities.values():
period_max = max(period_max, s['period'])
seasonality_dt = pd.Timedelta(str(period_max) + ' days')
if cutoffs is None:
# Set period
period = 0.5 * horizon if period is None else pd.Timedelta(period)
# Set initial
if initial is None:
initial = max(3 * horizon, seasonality_dt)
else:
initial = pd.Timedelta(initial)
# Compute Cutoffs
cutoffs = generate_cutoffs(df, horizon, initial, period)
else:
initial = cutoffs[0] - df['ds'].min()
# Check if the initial window
# (that is, the amount of time between the start of the history and the first cutoff)
# is less than the maximum seasonality period
if initial < seasonality_dt:
msg = 'Seasonality has period of {} days '.format(period_max)
msg += 'which is larger than initial window. '
msg += 'Consider increasing initial.'
logger.warning(msg)
if parallel:
valid = {"threads", "processes", "dask"}
if parallel == "threads":
pool = concurrent.futures.ThreadPoolExecutor()
elif parallel == "processes":
pool = concurrent.futures.ProcessPoolExecutor()
elif parallel == "dask":
try:
from dask.distributed import get_client
except ImportError as e:
raise ImportError("parallel='dask' requies the optional "
"dependency dask.") from e
pool = get_client()
# delay df and model to avoid large objects in task graph.
df, model = pool.scatter([df, model])
elif hasattr(parallel, "map"):
pool = parallel
else:
msg = ("'parallel' should be one of {} for an instance with a "
"'map' method".format(', '.join(valid)))
raise ValueError(msg)
iterables = ((df, model, cutoff, horizon, predict_columns)
for cutoff in cutoffs)
iterables = zip(*iterables)
logger.info("Applying in parallel with %s", pool)
predicts = pool.map(single_cutoff_forecast, *iterables)
if parallel == "dask":
# convert Futures to DataFrames
predicts = pool.gather(predicts)
else:
predicts = [
single_cutoff_forecast(df, model, cutoff, horizon, predict_columns)
for cutoff in (tqdm(cutoffs) if not disable_tqdm else cutoffs)
]
# Combine all predicted pd.DataFrame into one pd.DataFrame
return pd.concat(predicts, axis=0).reset_index(drop=True)
|
58,155 |
def http_request(method, api_endpoint, payload=None, params={}, user_auth=True, is_file=False, headers=None):
is_user_auth = True
url = BASE_URL + api_endpoint
# 2 types of auth, user and non user, mostly user is needed
if user_auth:
demisto.info("in if statement")
headers = headers or generate_user_auth_headers(api_endpoint)
else:
demisto.info("in else statement")
# This type of auth is only supported for basic commands: login/discover/refresh-token
is_user_auth = False
auth = base64.b64encode((EMAIL_ADDRESS + ':' + PASSWORD).encode("utf-8")).decode()
auth_type = 'Basic-Cloud'
auth_header = auth_type + ' ' + auth
headers = {
'x-mc-app-id': APP_ID,
'Content-Type': 'application/json',
'Authorization': auth_header
}
LOG('running %s request with url=%s\tparams=%s\tdata=%s\tis user auth=%s' % (
method, url, json.dumps(params), json.dumps(payload), is_user_auth))
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
headers=headers,
data=json.dumps(payload)
)
res.raise_for_status()
if is_file:
return res
return res.json()
except HTTPError as e:
LOG(e)
if e.response.status_code == 418: # type: ignore # pylint: disable=no-member
if not APP_ID or not EMAIL_ADDRESS or not PASSWORD:
raise Exception(
'Credentials provided are expired, could not automatically refresh tokens.'
' App ID + Email Address '
'+ Password are required.')
else:
raise
except Exception as e:
LOG(e)
raise
|
def http_request(method, api_endpoint, payload=None, params={}, user_auth=True, is_file=False, headers=None):
is_user_auth = True
url = BASE_URL + api_endpoint
# 2 types of auth, user and non user, mostly user is needed
if user_auth:
headers = headers or generate_user_auth_headers(api_endpoint)
else:
# This type of auth is only supported for basic commands: login/discover/refresh-token
is_user_auth = False
auth = base64.b64encode((EMAIL_ADDRESS + ':' + PASSWORD).encode("utf-8")).decode()
auth_type = 'Basic-Cloud'
auth_header = auth_type + ' ' + auth
headers = {
'x-mc-app-id': APP_ID,
'Content-Type': 'application/json',
'Authorization': auth_header
}
LOG('running %s request with url=%s\tparams=%s\tdata=%s\tis user auth=%s' % (
method, url, json.dumps(params), json.dumps(payload), is_user_auth))
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
headers=headers,
data=json.dumps(payload)
)
res.raise_for_status()
if is_file:
return res
return res.json()
except HTTPError as e:
LOG(e)
if e.response.status_code == 418: # type: ignore # pylint: disable=no-member
if not APP_ID or not EMAIL_ADDRESS or not PASSWORD:
raise Exception(
'Credentials provided are expired, could not automatically refresh tokens.'
' App ID + Email Address '
'+ Password are required.')
else:
raise
except Exception as e:
LOG(e)
raise
|
45,423 |
def test_time_ops():
# Make a pandas.core.indexes.timedeltas.TimedeltaIndex
deltas = pd.to_timedelta([1], unit="h")
modin_series = pd.Series(np.datetime64("2000-12-12")) + deltas
pandas_series = pandas.Series(np.datetime64("2000-12-12")) + deltas
df_equals(modin_series, pandas_series)
modin_series = pd.Series(np.datetime64("2000-12-12")) - deltas
pandas_series = pandas.Series(np.datetime64("2000-12-12")) - deltas
df_equals(modin_series, pandas_series)
|
def test_time_ops():
# Make a pandas.core.indexes.timedeltas.TimedeltaIndex
deltas = pd.to_timedelta([1], unit="h")
modin_series = pd.Series(np.datetime64("2000-12-12")) + deltas
pandas_series = pandas.Series(np.datetime64("2000-12-12")) + deltas
df_equals(modin_series, pandas_series)
eval_general(*test_series, lambda s: s - deltas)
|
31,277 |
def get_account_id_from_attribute(attribute: str, max_results: str = '50') -> Union[CommandResults, str]:
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-user-search/#api-rest-api-3-user-search-get
Args:
attribute (str): Username or Email address of a user.
max_results (str): The maximum number of items to return. default by the server: 50
"""
users = search_user(attribute, max_results)
account_ids = {
user.get('accountId') for user in users if (attribute.lower() in [user.get('displayName', '').lower(),
user.get('emailAddress', '').lower()])}
if not account_ids:
return f'No Account ID was found for attribute: {attribute}.'
if len(account_ids) > 1:
return f'Multiple account IDs were found for attribute: {attribute}.\n' \
f'Please try provide the other attribute available - Email or DisplayName'
account_id = next(iter(account_ids))
outputs = {
'Attribute': attribute,
'AccountID': account_id
}
return CommandResults(
outputs_prefix='Jira.User',
outputs_key_field='AccountID',
readable_output=f'Account ID for attribute: {attribute} is: {account_id}',
outputs=outputs,
)
|
def get_account_id_from_attribute(attribute: str, max_results: str = '50') -> Union[CommandResults, str]:
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-user-search/#api-rest-api-3-user-search-get
Args:
attribute (str): Username or Email address of a user.
max_results (str): The maximum number of items to return. default by the server: 50
"""
users = search_user(attribute, max_results)
account_ids = {
user.get('accountId') for user in users if (attribute.lower() in [user.get('displayName', '').lower(),
user.get('emailAddress', '').lower()])}
if not account_ids:
return f'No Account ID was found for attribute: {attribute}.'
if len(account_ids) > 1:
return f'Multiple account IDs were found for attribute: {attribute}.\n' \
f'Please try to provide the other attribute available - Email or DisplayName.'
account_id = next(iter(account_ids))
outputs = {
'Attribute': attribute,
'AccountID': account_id
}
return CommandResults(
outputs_prefix='Jira.User',
outputs_key_field='AccountID',
readable_output=f'Account ID for attribute: {attribute} is: {account_id}',
outputs=outputs,
)
|
39,324 |
def perlin_noise(amplitude, freq: Sequence[float], phase: Sequence[float]):
"""Return the implicit function that implements Perlin noise.
Uses ``vtk.vtkPerlinNoise`` and computes a Perlin noise field as
an implicit function. ``vtk.vtkPerlinNoise`` is a concrete
implementation of ``vtk.vtkImplicitFunction``. Perlin noise,
originally described by Ken Perlin, is a non-periodic and
continuous noise function useful for modeling real-world objects.
The amplitude and frequency of the noise pattern are
adjustable. This implementation of Perlin noise is derived closely
from Greg Ward's version in Graphics Gems II.
Parameters
----------
amplitude : float
Amplitude of the noise function.
``amplitude`` can be negative. The noise function varies
randomly between ``-|Amplitude|`` and
``|Amplitude|``. Therefore the range of values is
``2*|Amplitude|`` large. The initial amplitude is 1.
freq : Sequence[float, float, float]
The frequency, or physical scale, of the noise function
(higher is finer scale).
The frequency can be adjusted per axis, or the same for all axes.
phase : Sequence[float, float, float]
Set/get the phase of the noise function.
This parameter can be used to shift the noise function within
space (perhaps to avoid a beat with a noise pattern at another
scale). Phase tends to repeat about every unit, so a phase of
0.5 is a half-cycle shift.
Examples
--------
Create a perlin noise function with an amplitude of 0.1, frequency
for all axes of 1, and a phase of 0 for all axes.
>>> import pyvista
>>> noise = pyvista.perlin_noise(0.1, (1, 1, 1), (0, 0, 0))
Sample perlin noise over a structured grid and plot it.
>>> grid = pyvista.sample_function(noise, [0, 5.0, 0, 5.0, 0, 5.0])
>>> grid.plot()
"""
noise = _vtk.vtkPerlinNoise()
noise.SetAmplitude(amplitude)
noise.SetFrequency(freq)
noise.SetPhase(phase)
return noise
|
def perlin_noise(amplitude, freq: Sequence[float], phase: Sequence[float]):
"""Return the implicit function that implements Perlin noise.
Uses ``vtk.vtkPerlinNoise`` and computes a Perlin noise field as
an implicit function. ``vtk.vtkPerlinNoise`` is a concrete
implementation of ``vtk.vtkImplicitFunction``. Perlin noise,
originally described by Ken Perlin, is a non-periodic and
continuous noise function useful for modeling real-world objects.
The amplitude and frequency of the noise pattern are
adjustable. This implementation of Perlin noise is derived closely
from Greg Ward's version in Graphics Gems II.
Parameters
----------
amplitude : float
Amplitude of the noise function.
``amplitude`` can be negative. The noise function varies
randomly between ``-|Amplitude|`` and
``|Amplitude|``. Therefore the range of values is
``2*|Amplitude|`` large. The initial amplitude is 1.
freq : Sequence[float, float, float]
The frequency, or physical scale, of the noise function
(higher is finer scale).
The frequency can be adjusted per axis, or the same for all axes.
phase : Sequence[float, float, float]
Set/get the phase of the noise function.
This parameter can be used to shift the noise function within
space (perhaps to avoid a beat with a noise pattern at another
scale). Phase tends to repeat about every unit, so a phase of
0.5 is a half-cycle shift.
Examples
--------
Create a perlin noise function with an amplitude of 0.1, frequency
for all axes of 1, and a phase of 0 for all axes.
>>> import pyvista
>>> noise = pyvista.perlin_noise(0.1, (1, 1, 1), (0, 0, 0))
Sample perlin noise over a structured grid and plot it.
>>> grid = pyvista.sample_function(noise, [0, 5, 0, 5, 0, 5])
>>> grid.plot()
"""
noise = _vtk.vtkPerlinNoise()
noise.SetAmplitude(amplitude)
noise.SetFrequency(freq)
noise.SetPhase(phase)
return noise
|
40,162 |
def compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key: str = None,
local_l_var_key: str = None,
X_layers_key=None,
copy: bool = False,
):
"""Computes the library size
Parameters
----------
adata
anndata object containing counts
batch_key
key in obs for batch information
local_l_mean_key
key in obs to save the local log mean
local_l_var_key
key in obs to save the local log variance
X_layers_key
if not None, will use this in adata.layers[] for X
copy
if True, returns a copy of the adata
Returns
-------
type
anndata.AnnData if copy was True, else None
"""
assert batch_key in adata.obs_keys(), "batch_key not valid key in obs dataframe"
local_means = np.zeros((adata.shape[0], 1))
local_vars = np.zeros((adata.shape[0], 1))
batch_indices = adata.obs[batch_key]
for i_batch in np.unique(batch_indices):
idx_batch = np.squeeze(batch_indices == i_batch)
if X_layers_key is not None:
assert (
X_layers_key in adata.layers.keys()
), "X_layers_key not a valid key for adata.layers"
data = adata[idx_batch].layers[X_layers_key]
else:
data = adata[idx_batch].X
(local_means[idx_batch], local_vars[idx_batch],) = compute_library_size(data)
if local_l_mean_key is None:
local_l_mean_key = "_scvi_local_l_mean"
if local_l_var_key is None:
local_l_var_key = "_scvi_local_l_var"
if copy:
copy = adata.copy()
copy.obs[local_l_mean_key] = local_means
copy.obs[local_l_var_key] = local_vars
return copy
else:
adata.obs[local_l_mean_key] = local_means
adata.obs[local_l_var_key] = local_vars
|
def _compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key: str = None,
local_l_var_key: str = None,
X_layers_key=None,
copy: bool = False,
):
"""Computes the library size
Parameters
----------
adata
anndata object containing counts
batch_key
key in obs for batch information
local_l_mean_key
key in obs to save the local log mean
local_l_var_key
key in obs to save the local log variance
X_layers_key
if not None, will use this in adata.layers[] for X
copy
if True, returns a copy of the adata
Returns
-------
type
anndata.AnnData if copy was True, else None
"""
assert batch_key in adata.obs_keys(), "batch_key not valid key in obs dataframe"
local_means = np.zeros((adata.shape[0], 1))
local_vars = np.zeros((adata.shape[0], 1))
batch_indices = adata.obs[batch_key]
for i_batch in np.unique(batch_indices):
idx_batch = np.squeeze(batch_indices == i_batch)
if X_layers_key is not None:
assert (
X_layers_key in adata.layers.keys()
), "X_layers_key not a valid key for adata.layers"
data = adata[idx_batch].layers[X_layers_key]
else:
data = adata[idx_batch].X
(local_means[idx_batch], local_vars[idx_batch],) = compute_library_size(data)
if local_l_mean_key is None:
local_l_mean_key = "_scvi_local_l_mean"
if local_l_var_key is None:
local_l_var_key = "_scvi_local_l_var"
if copy:
copy = adata.copy()
copy.obs[local_l_mean_key] = local_means
copy.obs[local_l_var_key] = local_vars
return copy
else:
adata.obs[local_l_mean_key] = local_means
adata.obs[local_l_var_key] = local_vars
|
38,530 |
def max(
var0: pp.ad.Ad_array, var1: Union[pp.ad.Ad_array, np.ndarray]
) -> pp.ad.Ad_array:
"""Ad max function.
The second argument is allowed to be constant, with a numpy array originally
wrapped in a pp.ad.Array, whereas the first argument is expected to be an
Ad_array originating from a pp.ad.Operator.
Parameters
----------
var0 : pp.ad.Ad_array
Ad operator (variable or expression).
var1 : Union[pp.ad.Ad_array, pp.ad.Array]
Ad operator (variable or expression) OR ad Array.
Returns
-------
pp.ad.Ad_array
The maximum of var0 and var1 with appropriate val and jac attributes.
"""
vals = [var0.val.copy()]
jacs = [var0.jac.copy()]
if isinstance(var1, np.ndarray):
vals.append(var1.copy())
jacs.append(sps.csr_matrix((var0.jac.shape)))
else:
vals.append(var1.val.copy())
jacs.append(var1.jac.copy())
inds = vals[1] >= vals[0]
max_val = vals[0].copy()
max_val[inds] = vals[1][inds]
max_jac = jacs[0].copy()
max_jac[inds] = jacs[1][inds].copy()
return pp.ad.Ad_array(max_val, max_jac)
|
def max(
var0: pp.ad.Ad_array, var1: Union[pp.ad.Ad_array, np.ndarray]
) -> pp.ad.Ad_array:
"""Ad max function represented as an Ad_array.
The second argument is allowed to be constant, with a numpy array originally
wrapped in a pp.ad.Array, whereas the first argument is expected to be an
Ad_array originating from a pp.ad.Operator.
Parameters
----------
var0 : pp.ad.Ad_array
Ad operator (variable or expression).
var1 : Union[pp.ad.Ad_array, pp.ad.Array]
Ad operator (variable or expression) OR ad Array.
Returns
-------
pp.ad.Ad_array
The maximum of var0 and var1 with appropriate val and jac attributes.
"""
vals = [var0.val.copy()]
jacs = [var0.jac.copy()]
if isinstance(var1, np.ndarray):
vals.append(var1.copy())
jacs.append(sps.csr_matrix((var0.jac.shape)))
else:
vals.append(var1.val.copy())
jacs.append(var1.jac.copy())
inds = vals[1] >= vals[0]
max_val = vals[0].copy()
max_val[inds] = vals[1][inds]
max_jac = jacs[0].copy()
max_jac[inds] = jacs[1][inds].copy()
return pp.ad.Ad_array(max_val, max_jac)
|
5,356 |
def latest(
name,
refresh=None,
fromrepo=None,
skip_verify=False,
pkgs=None,
watch_flags=True,
**kwargs
):
"""
Ensure that the named package is installed and the latest available
package. If the package can be updated, this state function will update
the package. Generally it is better for the
:mod:`installed <salt.states.pkg.installed>` function to be
used, as :mod:`latest <salt.states.pkg.latest>` will update the package
whenever a new package is available.
.. note::
Any argument which is either a) not explicitly defined for this state,
or b) not a global state argument like ``saltenv``, or
``reload_modules``, will be passed through to the call to
``pkg.install`` to install the package(s). For example, you can include
a ``disablerepo`` argument on platforms that use yum/dnf to disable
that repo:
.. code-block:: yaml
mypkg:
pkg.latest:
- disablerepo: base,updates
To see what is supported, check :ref:`this page <virtual-pkg>` to find
the documentation for your platform's ``pkg`` module, then look at the
documentation for the ``install`` function.
Any argument that is passed through to the ``install`` function, which
is not defined for that function, will be silently ignored.
name
The name of the package to maintain at the latest available version.
This parameter is ignored if "pkgs" is used.
fromrepo
Specify a repository from which to install
skip_verify
Skip the GPG verification check for the package to be installed
refresh
This parameter controls whether or not the package repo database is
updated prior to checking for the latest available version of the
requested packages.
If ``True``, the package database will be refreshed (``apt-get update``
or equivalent, depending on platform) before checking for the latest
available version of the requested packages.
If ``False``, the package database will *not* be refreshed before
checking.
If unset, then Salt treats package database refreshes differently
depending on whether or not a ``pkg`` state has been executed already
during the current Salt run. Once a refresh has been performed in a
``pkg`` state, for the remainder of that Salt run no other refreshes
will be performed for ``pkg`` states which do not explicitly set
``refresh`` to ``True``. This prevents needless additional refreshes
from slowing down the Salt run.
:param str cache_valid_time:
.. versionadded:: 2016.11.0
This parameter sets the value in seconds after which the cache is
marked as invalid, and a cache update is necessary. This overwrites
the ``refresh`` parameter's default behavior.
Example:
.. code-block:: yaml
httpd:
pkg.latest:
- refresh: True
- cache_valid_time: 300
In this case, a refresh will not take place for 5 minutes since the last
``apt-get update`` was executed on the system.
.. note::
This parameter is available only on Debian based distributions and
has no effect on the rest.
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
Multiple Package Installation Options:
(Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil)
pkgs
A list of packages to maintain at the latest available version.
.. code-block:: yaml
mypkgs:
pkg.latest:
- pkgs:
- foo
- bar
- baz
install_recommends
Whether to install the packages marked as recommended. Default is
``True``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.latest:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is
``False``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.latest:
- only_upgrade: True
.. note::
If this parameter is set to True and the package is not already
installed, the state will fail.
report_reboot_exit_codes
If the installer exits with a recognized exit code indicating that
a reboot is required, the module function
*win_system.set_reboot_required_witnessed*
will be called, preserving the knowledge of this event
for the remainder of the current boot session. For the time being,
``3010`` is the only recognized exit code, but this
is subject to future refinement. The value of this param
defaults to ``True``. This parameter has no effect on
non-Windows systems.
.. versionadded:: 2016.11.0
.. code-block:: yaml
ms vcpp installed:
pkg.latest:
- name: ms-vcpp
- report_reboot_exit_codes: False
"""
refresh = salt.utils.pkg.check_refresh(__opts__, refresh)
if kwargs.get("sources"):
return {
"name": name,
"changes": {},
"result": False,
"comment": 'The "sources" parameter is not supported.',
}
elif pkgs:
desired_pkgs = list(_repack_pkgs(pkgs).keys()) # pylint: disable=not-callable
if not desired_pkgs:
# Badly-formatted SLS
return {
"name": name,
"changes": {},
"result": False,
"comment": 'Invalidly formatted "pkgs" parameter. See ' "minion log.",
}
else:
if isinstance(pkgs, list) and len(pkgs) == 0:
return {
"name": name,
"changes": {},
"result": True,
"comment": "No packages to install provided",
}
else:
desired_pkgs = [name]
kwargs["saltenv"] = __env__
# check if capabilities should be checked and modify the requested packages
# accordingly.
desired_pkgs, refresh = _resolve_capabilities(
desired_pkgs, refresh=refresh, **kwargs
)
try:
avail = __salt__["pkg.latest_version"](
*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs
)
except CommandExecutionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while checking the "
"newest available version of package(s): {}".format(exc),
}
try:
cur = __salt__["pkg.version"](*desired_pkgs, **kwargs)
except CommandExecutionError as exc:
return {"name": name, "changes": {}, "result": False, "comment": exc.strerror}
# Repack the cur/avail data if only a single package is being checked
if isinstance(cur, str):
cur = {desired_pkgs[0]: cur}
if isinstance(avail, str):
avail = {desired_pkgs[0]: avail}
targets = {}
problems = []
for pkg in desired_pkgs:
if not avail.get(pkg):
# Package either a) is up-to-date, or b) does not exist
if not cur.get(pkg):
# Package does not exist
msg = "No information found for '{}'.".format(pkg)
log.error(msg)
problems.append(msg)
elif (
watch_flags
and __grains__.get("os") == "Gentoo"
and __salt__["portage_config.is_changed_uses"](pkg)
):
# Package is up-to-date, but Gentoo USE flags are changing so
# we need to add it to the targets
targets[pkg] = cur[pkg]
else:
# Package either a) is not installed, or b) is installed and has an
# upgrade available
targets[pkg] = avail[pkg]
if problems:
return {
"name": name,
"changes": {},
"result": False,
"comment": " ".join(problems),
}
if targets:
# Find up-to-date packages
if not pkgs:
# There couldn't have been any up-to-date packages if this state
# only targeted a single package and is being allowed to proceed to
# the install step.
up_to_date = []
else:
up_to_date = [x for x in pkgs if x not in targets]
if __opts__["test"]:
comments = []
comments.append(
"The following packages would be installed/upgraded: "
+ ", ".join(sorted(targets))
)
if up_to_date:
up_to_date_count = len(up_to_date)
if up_to_date_count <= 10:
comments.append(
"The following packages are already up-to-date: "
+ ", ".join(
["{} ({})".format(x, cur[x]) for x in sorted(up_to_date)]
)
)
else:
comments.append(
"{} packages are already up-to-date".format(up_to_date_count)
)
return {
"name": name,
"changes": {},
"result": None,
"comment": "\n".join(comments),
}
if salt.utils.platform.is_windows():
# pkg.install execution module on windows ensures the software
# package is installed when no version is specified, it does not
# upgrade the software to the latest. This is per the design.
# Build updated list of pkgs *with verion number*, exclude
# non-targeted ones
targeted_pkgs = [{x: targets[x]} for x in targets]
else:
# Build updated list of pkgs to exclude non-targeted ones
targeted_pkgs = list(targets)
# No need to refresh, if a refresh was necessary it would have been
# performed above when pkg.latest_version was run.
try:
changes = __salt__["pkg.install"](
name=None,
refresh=False,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=targeted_pkgs,
**kwargs
)
except CommandExecutionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while installing "
"package(s): {}".format(exc),
}
if changes:
# Find failed and successful updates
failed = [
x
for x in targets
if not changes.get(x)
or changes[x].get("new") != targets[x]
and targets[x] != "latest"
]
successful = [x for x in targets if x not in failed]
comments = []
if failed:
msg = "The following packages failed to update: " "{}".format(
", ".join(sorted(failed))
)
comments.append(msg)
if successful:
msg = (
"The following packages were successfully "
"installed/upgraded: "
"{}".format(", ".join(sorted(successful)))
)
comments.append(msg)
if up_to_date:
if len(up_to_date) <= 10:
msg = (
"The following packages were already up-to-date: "
"{}".format(", ".join(sorted(up_to_date)))
)
else:
msg = "{} packages were already up-to-date ".format(len(up_to_date))
comments.append(msg)
return {
"name": name,
"changes": changes,
"result": False if failed else True,
"comment": " ".join(comments),
}
else:
if len(targets) > 10:
comment = (
"{} targeted packages failed to update. "
"See debug log for details.".format(len(targets))
)
elif len(targets) > 1:
comment = (
"The following targeted packages failed to update. "
"See debug log for details: ({}).".format(
", ".join(sorted(targets))
)
)
else:
comment = "Package {} failed to " "update.".format(
next(iter(list(targets.keys())))
)
if up_to_date:
if len(up_to_date) <= 10:
comment += (
" The following packages were already "
"up-to-date: "
"{}".format(", ".join(sorted(up_to_date)))
)
else:
comment += "{} packages were already " "up-to-date".format(
len(up_to_date)
)
return {
"name": name,
"changes": changes,
"result": False,
"comment": comment,
}
else:
if len(desired_pkgs) > 10:
comment = "All {} packages are up-to-date.".format(len(desired_pkgs))
elif len(desired_pkgs) > 1:
comment = "All packages are up-to-date " "({}).".format(
", ".join(sorted(desired_pkgs))
)
else:
comment = "Package {} is already " "up-to-date".format(desired_pkgs[0])
return {"name": name, "changes": {}, "result": True, "comment": comment}
|
def latest(
name,
refresh=None,
fromrepo=None,
skip_verify=False,
pkgs=None,
watch_flags=True,
**kwargs
):
"""
Ensure that the named package is installed and the latest available
package. If the package can be updated, this state function will update
the package. Generally it is better for the
:mod:`installed <salt.states.pkg.installed>` function to be
used, as :mod:`latest <salt.states.pkg.latest>` will update the package
whenever a new package is available.
.. note::
Any argument which is either a) not explicitly defined for this state,
or b) not a global state argument like ``saltenv``, or
``reload_modules``, will be passed through to the call to
``pkg.install`` to install the package(s). For example, you can include
a ``disablerepo`` argument on platforms that use yum/dnf to disable
that repo:
.. code-block:: yaml
mypkg:
pkg.latest:
- disablerepo: base,updates
To see what is supported, check :ref:`this page <virtual-pkg>` to find
the documentation for your platform's ``pkg`` module, then look at the
documentation for the ``install`` function.
Any argument that is passed through to the ``install`` function, which
is not defined for that function, will be silently ignored.
name
The name of the package to maintain at the latest available version.
This parameter is ignored if "pkgs" is used.
fromrepo
Specify a repository from which to install
skip_verify
Skip the GPG verification check for the package to be installed
refresh
This parameter controls whether or not the package repo database is
updated prior to checking for the latest available version of the
requested packages.
If ``True``, the package database will be refreshed (``apt-get update``
or equivalent, depending on platform) before checking for the latest
available version of the requested packages.
If ``False``, the package database will *not* be refreshed before
checking.
If unset, then Salt treats package database refreshes differently
depending on whether or not a ``pkg`` state has been executed already
during the current Salt run. Once a refresh has been performed in a
``pkg`` state, for the remainder of that Salt run no other refreshes
will be performed for ``pkg`` states which do not explicitly set
``refresh`` to ``True``. This prevents needless additional refreshes
from slowing down the Salt run.
:param str cache_valid_time:
.. versionadded:: 2016.11.0
This parameter sets the value in seconds after which the cache is
marked as invalid, and a cache update is necessary. This overwrites
the ``refresh`` parameter's default behavior.
Example:
.. code-block:: yaml
httpd:
pkg.latest:
- refresh: True
- cache_valid_time: 300
In this case, a refresh will not take place for 5 minutes since the last
``apt-get update`` was executed on the system.
.. note::
This parameter is available only on Debian based distributions and
has no effect on the rest.
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
Multiple Package Installation Options:
(Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil)
pkgs
A list of packages to maintain at the latest available version.
.. code-block:: yaml
mypkgs:
pkg.latest:
- pkgs:
- foo
- bar
- baz
install_recommends
Whether to install the packages marked as recommended. Default is
``True``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.latest:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is
``False``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.latest:
- only_upgrade: True
.. note::
If this parameter is set to True and the package is not already
installed, the state will fail.
report_reboot_exit_codes
If the installer exits with a recognized exit code indicating that
a reboot is required, the module function
*win_system.set_reboot_required_witnessed*
will be called, preserving the knowledge of this event
for the remainder of the current boot session. For the time being,
``3010`` is the only recognized exit code, but this
is subject to future refinement. The value of this param
defaults to ``True``. This parameter has no effect on
non-Windows systems.
.. versionadded:: 2016.11.0
.. code-block:: yaml
ms vcpp installed:
pkg.latest:
- name: ms-vcpp
- report_reboot_exit_codes: False
"""
refresh = salt.utils.pkg.check_refresh(__opts__, refresh)
if kwargs.get("sources"):
return {
"name": name,
"changes": {},
"result": False,
"comment": 'The "sources" parameter is not supported.',
}
elif pkgs:
desired_pkgs = list(_repack_pkgs(pkgs).keys()) # pylint: disable=not-callable
if not desired_pkgs:
# Badly-formatted SLS
return {
"name": name,
"changes": {},
"result": False,
"comment": 'Invalidly formatted "pkgs" parameter. See ' "minion log.",
}
else:
if isinstance(pkgs, list) and len(pkgs) == 0:
return {
"name": name,
"changes": {},
"result": True,
"comment": "No packages to install provided",
}
else:
desired_pkgs = [name]
kwargs["saltenv"] = __env__
# check if capabilities should be checked and modify the requested packages
# accordingly.
desired_pkgs, refresh = _resolve_capabilities(
desired_pkgs, refresh=refresh, **kwargs
)
try:
avail = __salt__["pkg.latest_version"](
*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs
)
except CommandExecutionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while checking the "
"newest available version of package(s): {}".format(exc),
}
try:
cur = __salt__["pkg.version"](*desired_pkgs, **kwargs)
except CommandExecutionError as exc:
return {"name": name, "changes": {}, "result": False, "comment": exc.strerror}
# Repack the cur/avail data if only a single package is being checked
if isinstance(cur, str):
cur = {desired_pkgs[0]: cur}
if isinstance(avail, str):
avail = {desired_pkgs[0]: avail}
targets = {}
problems = []
for pkg in desired_pkgs:
if not avail.get(pkg):
# Package either a) is up-to-date, or b) does not exist
if not cur.get(pkg):
# Package does not exist
msg = "No information found for '{}'.".format(pkg)
log.error(msg)
problems.append(msg)
elif (
watch_flags
and __grains__.get("os") == "Gentoo"
and __salt__["portage_config.is_changed_uses"](pkg)
):
# Package is up-to-date, but Gentoo USE flags are changing so
# we need to add it to the targets
targets[pkg] = cur[pkg]
else:
# Package either a) is not installed, or b) is installed and has an
# upgrade available
targets[pkg] = avail[pkg]
if problems:
return {
"name": name,
"changes": {},
"result": False,
"comment": " ".join(problems),
}
if targets:
# Find up-to-date packages
if not pkgs:
# There couldn't have been any up-to-date packages if this state
# only targeted a single package and is being allowed to proceed to
# the install step.
up_to_date = []
else:
up_to_date = [x for x in pkgs if x not in targets]
if __opts__["test"]:
comments = []
comments.append(
"The following packages would be installed/upgraded: "
+ ", ".join(sorted(targets))
)
if up_to_date:
up_to_date_count = len(up_to_date)
if up_to_date_count <= 10:
comments.append(
"The following packages are already up-to-date: "
+ ", ".join(
["{} ({})".format(x, cur[x]) for x in sorted(up_to_date)]
)
)
else:
comments.append(
"{} packages are already up-to-date".format(up_to_date_count)
)
return {
"name": name,
"changes": {},
"result": None,
"comment": "\n".join(comments),
}
if salt.utils.platform.is_windows():
# pkg.install execution module on windows ensures the software
# package is installed when no version is specified, it does not
# upgrade the software to the latest. This is per the design.
# Build updated list of pkgs *with verion number*, exclude
# non-targeted ones
targeted_pkgs = [{x: targets[x]} for x in targets]
else:
# Build updated list of pkgs to exclude non-targeted ones
targeted_pkgs = list(targets)
# No need to refresh, if a refresh was necessary it would have been
# performed above when pkg.latest_version was run.
try:
changes = __salt__["pkg.install"](
name=None,
refresh=False,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=targeted_pkgs,
**kwargs
)
except CommandExecutionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while installing "
"package(s): {}".format(exc),
}
if changes:
# Find failed and successful updates
failed = [
x
for x in targets
if not changes.get(x)
or changes[x].get("new") != targets[x]
and targets[x] != "latest"
]
successful = [x for x in targets if x not in failed]
comments = []
if failed:
msg = "The following packages failed to update: " "{}".format(
", ".join(sorted(failed))
)
comments.append(msg)
if successful:
msg = (
"The following packages were successfully "
"installed/upgraded: "
"{}".format(", ".join(sorted(successful)))
)
comments.append(msg)
if up_to_date:
if len(up_to_date) <= 10:
msg = (
"The following packages were already up-to-date: "
"{}".format(", ".join(sorted(up_to_date)))
)
else:
msg = "{} packages were already up-to-date ".format(len(up_to_date))
comments.append(msg)
return {
"name": name,
"changes": changes,
"result": False if failed else True,
"comment": " ".join(comments),
}
else:
if len(targets) > 10:
comment = (
"{} targeted packages failed to update. "
"See debug log for details.".format(len(targets))
)
elif len(targets) > 1:
comment = (
"The following targeted packages failed to update. "
"See debug log for details: ({}).".format(
", ".join(sorted(targets))
)
)
else:
comment = "Package {} failed to " "update.".format(
next(iter(list(targets.keys())))
)
if up_to_date:
if len(up_to_date) <= 10:
comment += (
" The following packages were already "
"up-to-date: "
"{}".format(", ".join(sorted(up_to_date)))
)
else:
comment += "{} packages were already " "up-to-date".format(
len(up_to_date)
)
return {
"name": name,
"changes": changes,
"result": False,
"comment": comment,
}
else:
if len(desired_pkgs) > 10:
comment = "All {} packages are up-to-date.".format(len(desired_pkgs))
elif len(desired_pkgs) > 1:
comment = "All packages are up-to-date ({}).".format(
", ".join(sorted(desired_pkgs))
)
else:
comment = "Package {} is already " "up-to-date".format(desired_pkgs[0])
return {"name": name, "changes": {}, "result": True, "comment": comment}
|
56,744 |
def get_moment(rv: TensorVariable) -> TensorVariable:
"""Method for specification of an stable starting point/value
for sampling of a distribution.
The only parameter to this function is the RandomVariable
for which the value is to be derived.
"""
size = rv.owner.inputs[1]
return _get_moment(rv.owner.op, rv, size, *rv.owner.inputs[3:])
|
def get_moment(rv: TensorVariable) -> TensorVariable:
"""Method for choosing a representative point/value
that can be used to start optimization or MCMC sampling.
The only parameter to this function is the RandomVariable
for which the value is to be derived.
"""
size = rv.owner.inputs[1]
return _get_moment(rv.owner.op, rv, size, *rv.owner.inputs[3:])
|
22,825 |
def _add_camera_to_bundle(ba, camera, camera_prior, constant):
"""Add camera to a bundle adjustment problem."""
if camera.projection_type == 'perspective':
ba.add_perspective_camera(
camera.id, camera.focal, camera.k1, camera.k2,
camera_prior.focal, camera_prior.k1, camera_prior.k2,
constant)
elif camera.projection_type == 'brown':
c = csfm.BABrownPerspectiveCamera()
c.id = camera.id
c.focal_x = camera.focal_x
c.focal_y = camera.focal_y
c.c_x = camera.c_x
c.c_y = camera.c_y
c.k1 = camera.k1
c.k2 = camera.k2
c.p1 = camera.p1
c.p2 = camera.p2
c.k3 = camera.k3
c.focal_x_prior = camera_prior.focal_x
c.focal_y_prior = camera_prior.focal_y
c.c_x_prior = camera_prior.c_x
c.c_y_prior = camera_prior.c_y
c.k1_prior = camera_prior.k1
c.k2_prior = camera_prior.k2
c.p1_prior = camera_prior.p1
c.p2_prior = camera_prior.p2
c.k3_prior = camera_prior.k3
c.constant = constant
ba.add_brown_perspective_camera(c)
elif camera.projection_type == 'fisheye':
ba.add_fisheye_camera(
camera.id, camera.focal, camera.k1, camera.k2,
camera_prior.focal, camera_prior.k1, camera_prior.k2,
constant)
elif camera.projection_type == 'dual':
ba.add_dual_camera(
camera.id, camera.focal, camera.k1, camera.k2,
camera.focal_prior, camera.k1_prior, camera.k2_prior,
camera.transition, constant)
elif camera.projection_type in ['equirectangular', 'spherical']:
ba.add_equirectangular_camera(camera.id)
|
def _add_camera_to_bundle(ba, camera, camera_prior, constant):
"""Add camera to a bundle adjustment problem."""
if camera.projection_type == 'perspective':
ba.add_perspective_camera(
camera.id, camera.focal, camera.k1, camera.k2,
camera_prior.focal, camera_prior.k1, camera_prior.k2,
constant)
elif camera.projection_type == 'brown':
c = csfm.BABrownPerspectiveCamera()
c.id = camera.id
c.focal_x = camera.focal_x
c.focal_y = camera.focal_y
c.c_x = camera.c_x
c.c_y = camera.c_y
c.k1 = camera.k1
c.k2 = camera.k2
c.p1 = camera.p1
c.p2 = camera.p2
c.k3 = camera.k3
c.focal_x_prior = camera_prior.focal_x
c.focal_y_prior = camera_prior.focal_y
c.c_x_prior = camera_prior.c_x
c.c_y_prior = camera_prior.c_y
c.k1_prior = camera_prior.k1
c.k2_prior = camera_prior.k2
c.p1_prior = camera_prior.p1
c.p2_prior = camera_prior.p2
c.k3_prior = camera_prior.k3
c.constant = constant
ba.add_brown_perspective_camera(c)
elif camera.projection_type == 'fisheye':
ba.add_fisheye_camera(
camera.id, camera.focal, camera.k1, camera.k2,
camera_prior.focal, camera_prior.k1, camera_prior.k2,
constant)
elif camera.projection_type == 'dual':
ba.add_dual_camera(
camera.id, camera.focal, camera.k1, camera.k2,
camera_prior.focal, camera_prior.k1, camera_prior.k2,
camera.transition, constant)
elif camera.projection_type in ['equirectangular', 'spherical']:
ba.add_equirectangular_camera(camera.id)
|
46,556 |
def run_fork_test(spec, pre_state):
yield 'pre', pre_state
post_state = spec.upgrade_to_lightclient_patch(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == spec.LIGHTCLIENT_PATCH_FORK_VERSION
assert post_state.fork.epoch == spec.get_current_epoch(post_state)
yield 'post', post_state
|
def run_fork_test(spec, pre_state):
yield 'pre', pre_state
post_state = post_spec.upgrade_to_lightclient_patch(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == spec.LIGHTCLIENT_PATCH_FORK_VERSION
assert post_state.fork.epoch == spec.get_current_epoch(post_state)
yield 'post', post_state
|
34,559 |
def migrate_mapping_policy_to_rules(
config: Dict[Text, Any], domain: "Domain", rules: List[Dict[Text, Any]]
):
"""
Migrate MappingPolicy to the new RulePolicy,
by updating the config, domain and generating rules.
This function modifies the config, the domain and the rules in place.
"""
policies = config.get("policies", [])
has_mapping_policy = False
has_rule_policy = False
for policy in policies:
if policy.get("name") == "MappingPolicy":
has_mapping_policy = True
if policy.get("name") == "RulePolicy":
has_rule_policy = True
if not has_mapping_policy:
return
has_one_triggered_action = False
for intent, properties in domain.intent_properties.items():
# remove triggers from intents, if any
triggered_action = properties.pop("triggers", None)
if triggered_action:
has_one_triggered_action = True
rules.append(
{
"rule": f"Rule to map `{intent}` intent (automatic conversion)",
"steps": [{"intent": intent}, {"action": triggered_action},],
}
)
# finally update the policies
policies = [policy for policy in policies if policy.get("name") != "MappingPolicy"]
if has_one_triggered_action and not has_rule_policy:
policies.append({"name": "RulePolicy"})
config["policies"] = policies
|
def migrate_mapping_policy_to_rules(
config: Dict[Text, Any], domain: "Domain", rules: List[Dict[Text, Any]]
):
"""
Migrate MappingPolicy to the new RulePolicy,
by updating the config, domain and generating rules.
This function modifies the config, the domain and the rules in place.
"""
policies = config.get("policies", [])
has_mapping_policy = False
has_rule_policy = False
for policy in policies:
if policy.get("name") == MappingPolicy.__name__:
has_mapping_policy = True
if policy.get("name") == "RulePolicy":
has_rule_policy = True
if not has_mapping_policy:
return
has_one_triggered_action = False
for intent, properties in domain.intent_properties.items():
# remove triggers from intents, if any
triggered_action = properties.pop("triggers", None)
if triggered_action:
has_one_triggered_action = True
rules.append(
{
"rule": f"Rule to map `{intent}` intent (automatic conversion)",
"steps": [{"intent": intent}, {"action": triggered_action},],
}
)
# finally update the policies
policies = [policy for policy in policies if policy.get("name") != "MappingPolicy"]
if has_one_triggered_action and not has_rule_policy:
policies.append({"name": "RulePolicy"})
config["policies"] = policies
|
35,100 |
def legalize_pass(
rewriter_cls: DFPatternCallback, name: Optional[str] = "", opt_level: Optional[int] = 1
) -> "RewriterWrapper":
"""
Wraps a pattern rewriter as a module pass.
Parameters
----------
rewriter_class : DFPatternCallback
Rewrites a matched pattern subject to callback function.
name : Optional[str]
Name for the module pass.
opt_level : Optional[int]
Optimization level for the module pass. Default 1.
Returns
-------
RewriterWrapper
The module pass.
"""
@ir.transform.module_pass(opt_level=opt_level, name=name)
class RewriterWrapper:
def transform_module(self, mod: tvm.ir.IRModule, _) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(rewriter_cls(), func)
mod.update_func(global_var, func)
return mod
return RewriterWrapper
|
def legalize_pass(
rewriter_cls: DFPatternCallback, name: str = "", opt_level: int = 1
) -> "RewriterWrapper":
"""
Wraps a pattern rewriter as a module pass.
Parameters
----------
rewriter_class : DFPatternCallback
Rewrites a matched pattern subject to callback function.
name : Optional[str]
Name for the module pass.
opt_level : Optional[int]
Optimization level for the module pass. Default 1.
Returns
-------
RewriterWrapper
The module pass.
"""
@ir.transform.module_pass(opt_level=opt_level, name=name)
class RewriterWrapper:
def transform_module(self, mod: tvm.ir.IRModule, _) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(rewriter_cls(), func)
mod.update_func(global_var, func)
return mod
return RewriterWrapper
|
1,323 |
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=None, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if
metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See also
--------
pairwise_distances_chunked : performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding
elements of two arrays
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
whom = ("`pairwise_distances`. Precomputed distance "
" need to have non-negative values.")
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if dtype == bool and \
(X.dtype != bool or not ((Y is None) or Y.dtype == bool)):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=None, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if
metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See also
--------
pairwise_distances_chunked : performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding
elements of two arrays
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
whom = ("`pairwise_distances`. Precomputed distance "
" need to have non-negative values.")
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if dtype == bool and \
(X.dtype != bool or (Y is not None and Y.dtype != bool)):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
31,381 |
def split_and_escape(key: str, delimiter) -> List[str]:
"""
Split key by delimiter with escape support.
:param key: string which will be split
:param delimiter: delimiter
:return:
"""
regex = r"(?<!\\)" + re.escape(delimiter)
split_keys = map(lambda x: x.replace(r"\{}".format(delimiter), delimiter), re.split(regex, key))
keys = [split_key.strip() for split_key in list(split_keys)]
return keys
|
def split_and_escape(key: str, delimiter) -> List[str]:
"""
Split key by delimiter with escape support.
:param key: string which will be split
:param delimiter: delimiter
:return: a list of the extract keys
"""
regex = r"(?<!\\)" + re.escape(delimiter)
split_keys = map(lambda x: x.replace(r"\{}".format(delimiter), delimiter), re.split(regex, key))
keys = [split_key.strip() for split_key in list(split_keys)]
return keys
|
48,937 |
def write_wale_environment(placeholders, prefix, overwrite):
s3_names = ['WALE_S3_PREFIX', 'WALG_S3_PREFIX', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',
'WALE_S3_ENDPOINT', 'AWS_ENDPOINT', 'AWS_REGION', 'AWS_INSTANCE_PROFILE',
'WALG_S3_SSE_KMS_ID', 'WALG_S3_SSE', 'WALG_DISABLE_S3_SSE', 'AWS_S3_FORCE_PATH_STYLE']
azure_names = ['AZURE_STORAGE_ACCESS_KEY', 'AZURE_STORAGE_ACCOUNT', 'AZURE_STORAGE_SAS_TOKEN',
'WALG_AZURE_BUFFER_SIZE', 'WALG_AZURE_MAX_BUFFERS', 'WALG_AZ_PREFIX']
gs_names = ['WALE_GS_PREFIX', 'WALG_GS_PREFIX', 'GOOGLE_APPLICATION_CREDENTIALS']
swift_names = ['WALE_SWIFT_PREFIX', 'SWIFT_AUTHURL', 'SWIFT_TENANT', 'SWIFT_TENANT_ID', 'SWIFT_USER',
'SWIFT_USER_ID', 'SWIFT_USER_DOMAIN_NAME', 'SWIFT_USER_DOMAIN_ID', 'SWIFT_PASSWORD',
'SWIFT_AUTH_VERSION', 'SWIFT_ENDPOINT_TYPE', 'SWIFT_REGION', 'SWIFT_DOMAIN_NAME', 'SWIFT_DOMAIN_ID',
'SWIFT_PROJECT_NAME', 'SWIFT_PROJECT_ID', 'SWIFT_PROJECT_DOMAIN_NAME', 'SWIFT_PROJECT_DOMAIN_ID']
ssh_names = WALG_SSH_NAMES
walg_names = ['WALG_DELTA_MAX_STEPS', 'WALG_DELTA_ORIGIN', 'WALG_DOWNLOAD_CONCURRENCY',
'WALG_UPLOAD_CONCURRENCY', 'WALG_UPLOAD_DISK_CONCURRENCY', 'WALG_DISK_RATE_LIMIT',
'WALG_NETWORK_RATE_LIMIT', 'WALG_COMPRESSION_METHOD', 'USE_WALG_BACKUP',
'USE_WALG_RESTORE', 'WALG_BACKUP_COMPRESSION_METHOD', 'WALG_BACKUP_FROM_REPLICA',
'WALG_SENTINEL_USER_DATA', 'WALG_PREVENT_WAL_OVERWRITE', 'WALG_S3_CA_CERT_FILE']
wale = defaultdict(lambda: '')
for name in ['PGVERSION', 'PGPORT', 'WALE_ENV_DIR', 'SCOPE', 'WAL_BUCKET_SCOPE_PREFIX', 'WAL_BUCKET_SCOPE_SUFFIX',
'WAL_S3_BUCKET', 'WAL_GCS_BUCKET', 'WAL_GS_BUCKET', 'WAL_SWIFT_BUCKET', 'BACKUP_NUM_TO_RETAIN',
'ENABLE_WAL_PATH_COMPAT'] + s3_names + swift_names + gs_names + walg_names + azure_names + ssh_names:
wale[name] = placeholders.get(prefix + name, '')
if wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX') or wale.get('WALG_S3_PREFIX'):
wale_endpoint = wale.pop('WALE_S3_ENDPOINT', None)
aws_endpoint = wale.pop('AWS_ENDPOINT', None)
aws_region = wale.pop('AWS_REGION', None)
# for S3-compatible storage we want to specify WALE_S3_ENDPOINT and AWS_ENDPOINT, but not AWS_REGION
if aws_endpoint or wale_endpoint:
if not aws_endpoint:
aws_endpoint = wale_endpoint.replace('+path://', '://')
elif not wale_endpoint:
wale_endpoint = aws_endpoint.replace('://', '+path://')
wale.update(WALE_S3_ENDPOINT=wale_endpoint, AWS_ENDPOINT=aws_endpoint, WALG_DISABLE_S3_SSE='true')
if aws_region and wale.get('USE_WALG_BACKUP') == 'true':
wale['AWS_REGION'] = aws_region
elif not aws_region:
# try to determine region from the endpoint or bucket name
name = wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX')
match = re.search(r'.*(\w{2}-\w+-\d)-.*', name)
if match:
aws_region = match.group(1)
else:
aws_region = placeholders['instance_data']['zone'][:-1]
wale['AWS_REGION'] = aws_region
else:
wale['AWS_REGION'] = aws_region
if not (wale.get('AWS_SECRET_ACCESS_KEY') and wale.get('AWS_ACCESS_KEY_ID')):
wale['AWS_INSTANCE_PROFILE'] = 'true'
if wale.get('USE_WALG_BACKUP') and wale.get('WALG_DISABLE_S3_SSE') != 'true' and not wale.get('WALG_S3_SSE'):
wale['WALG_S3_SSE'] = 'AES256'
write_envdir_names = s3_names + walg_names
elif wale.get('WAL_GCS_BUCKET') or wale.get('WAL_GS_BUCKET') or\
wale.get('WALE_GCS_PREFIX') or wale.get('WALE_GS_PREFIX') or wale.get('WALG_GS_PREFIX'):
if wale.get('WALE_GCS_PREFIX'):
wale['WALE_GS_PREFIX'] = wale['WALE_GCS_PREFIX']
elif wale.get('WAL_GCS_BUCKET'):
wale['WAL_GS_BUCKET'] = wale['WAL_GCS_BUCKET']
write_envdir_names = gs_names + walg_names
elif wale.get('WAL_SWIFT_BUCKET') or wale.get('WALE_SWIFT_PREFIX'):
write_envdir_names = swift_names
elif wale.get("WALG_AZ_PREFIX"):
write_envdir_names = azure_names + walg_names
elif wale.get("WALG_SSH_PREFIX"):
write_envdir_names = ssh_names + walg_names
else:
return
prefix_env_name = write_envdir_names[0]
store_type = prefix_env_name[5:].split('_')[0]
if not wale.get(prefix_env_name): # WALE_*_PREFIX is not defined in the environment
bucket_path = '/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION}'.format(**wale)
prefix_template = '{0}://{{WAL_{1}_BUCKET}}{2}'.format(store_type.lower(), store_type, bucket_path)
wale[prefix_env_name] = prefix_template.format(**wale)
# Set WALG_*_PREFIX for future compatibility
if store_type in ('S3', 'GS') and not wale.get(write_envdir_names[1]):
wale[write_envdir_names[1]] = wale[prefix_env_name]
if not os.path.exists(wale['WALE_ENV_DIR']):
os.makedirs(wale['WALE_ENV_DIR'])
wale['WALE_LOG_DESTINATION'] = 'stderr'
for name in write_envdir_names + ['WALE_LOG_DESTINATION', 'PGPORT'] + ([] if prefix else ['BACKUP_NUM_TO_RETAIN']):
if wale.get(name):
path = os.path.join(wale['WALE_ENV_DIR'], name)
write_file(wale[name], path, overwrite)
adjust_owner(path, gid=-1)
if not os.path.exists(placeholders['WALE_TMPDIR']):
os.makedirs(placeholders['WALE_TMPDIR'])
os.chmod(placeholders['WALE_TMPDIR'], 0o1777)
write_file(placeholders['WALE_TMPDIR'], os.path.join(wale['WALE_ENV_DIR'], 'TMPDIR'), True)
|
def write_wale_environment(placeholders, prefix, overwrite):
s3_names = ['WALE_S3_PREFIX', 'WALG_S3_PREFIX', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',
'WALE_S3_ENDPOINT', 'AWS_ENDPOINT', 'AWS_REGION', 'AWS_INSTANCE_PROFILE',
'WALG_S3_SSE_KMS_ID', 'WALG_S3_SSE', 'WALG_DISABLE_S3_SSE', 'AWS_S3_FORCE_PATH_STYLE']
azure_names = ['WALG_AZ_PREFIX', 'AZURE_STORAGE_ACCOUNT', 'AZURE_STORAGE_ACCESS_KEY',
'AZURE_STORAGE_SAS_TOKEN', 'WALG_AZURE_BUFFER_SIZE', 'WALG_AZURE_MAX_BUFFERS']
gs_names = ['WALE_GS_PREFIX', 'WALG_GS_PREFIX', 'GOOGLE_APPLICATION_CREDENTIALS']
swift_names = ['WALE_SWIFT_PREFIX', 'SWIFT_AUTHURL', 'SWIFT_TENANT', 'SWIFT_TENANT_ID', 'SWIFT_USER',
'SWIFT_USER_ID', 'SWIFT_USER_DOMAIN_NAME', 'SWIFT_USER_DOMAIN_ID', 'SWIFT_PASSWORD',
'SWIFT_AUTH_VERSION', 'SWIFT_ENDPOINT_TYPE', 'SWIFT_REGION', 'SWIFT_DOMAIN_NAME', 'SWIFT_DOMAIN_ID',
'SWIFT_PROJECT_NAME', 'SWIFT_PROJECT_ID', 'SWIFT_PROJECT_DOMAIN_NAME', 'SWIFT_PROJECT_DOMAIN_ID']
ssh_names = WALG_SSH_NAMES
walg_names = ['WALG_DELTA_MAX_STEPS', 'WALG_DELTA_ORIGIN', 'WALG_DOWNLOAD_CONCURRENCY',
'WALG_UPLOAD_CONCURRENCY', 'WALG_UPLOAD_DISK_CONCURRENCY', 'WALG_DISK_RATE_LIMIT',
'WALG_NETWORK_RATE_LIMIT', 'WALG_COMPRESSION_METHOD', 'USE_WALG_BACKUP',
'USE_WALG_RESTORE', 'WALG_BACKUP_COMPRESSION_METHOD', 'WALG_BACKUP_FROM_REPLICA',
'WALG_SENTINEL_USER_DATA', 'WALG_PREVENT_WAL_OVERWRITE', 'WALG_S3_CA_CERT_FILE']
wale = defaultdict(lambda: '')
for name in ['PGVERSION', 'PGPORT', 'WALE_ENV_DIR', 'SCOPE', 'WAL_BUCKET_SCOPE_PREFIX', 'WAL_BUCKET_SCOPE_SUFFIX',
'WAL_S3_BUCKET', 'WAL_GCS_BUCKET', 'WAL_GS_BUCKET', 'WAL_SWIFT_BUCKET', 'BACKUP_NUM_TO_RETAIN',
'ENABLE_WAL_PATH_COMPAT'] + s3_names + swift_names + gs_names + walg_names + azure_names + ssh_names:
wale[name] = placeholders.get(prefix + name, '')
if wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX') or wale.get('WALG_S3_PREFIX'):
wale_endpoint = wale.pop('WALE_S3_ENDPOINT', None)
aws_endpoint = wale.pop('AWS_ENDPOINT', None)
aws_region = wale.pop('AWS_REGION', None)
# for S3-compatible storage we want to specify WALE_S3_ENDPOINT and AWS_ENDPOINT, but not AWS_REGION
if aws_endpoint or wale_endpoint:
if not aws_endpoint:
aws_endpoint = wale_endpoint.replace('+path://', '://')
elif not wale_endpoint:
wale_endpoint = aws_endpoint.replace('://', '+path://')
wale.update(WALE_S3_ENDPOINT=wale_endpoint, AWS_ENDPOINT=aws_endpoint, WALG_DISABLE_S3_SSE='true')
if aws_region and wale.get('USE_WALG_BACKUP') == 'true':
wale['AWS_REGION'] = aws_region
elif not aws_region:
# try to determine region from the endpoint or bucket name
name = wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX')
match = re.search(r'.*(\w{2}-\w+-\d)-.*', name)
if match:
aws_region = match.group(1)
else:
aws_region = placeholders['instance_data']['zone'][:-1]
wale['AWS_REGION'] = aws_region
else:
wale['AWS_REGION'] = aws_region
if not (wale.get('AWS_SECRET_ACCESS_KEY') and wale.get('AWS_ACCESS_KEY_ID')):
wale['AWS_INSTANCE_PROFILE'] = 'true'
if wale.get('USE_WALG_BACKUP') and wale.get('WALG_DISABLE_S3_SSE') != 'true' and not wale.get('WALG_S3_SSE'):
wale['WALG_S3_SSE'] = 'AES256'
write_envdir_names = s3_names + walg_names
elif wale.get('WAL_GCS_BUCKET') or wale.get('WAL_GS_BUCKET') or\
wale.get('WALE_GCS_PREFIX') or wale.get('WALE_GS_PREFIX') or wale.get('WALG_GS_PREFIX'):
if wale.get('WALE_GCS_PREFIX'):
wale['WALE_GS_PREFIX'] = wale['WALE_GCS_PREFIX']
elif wale.get('WAL_GCS_BUCKET'):
wale['WAL_GS_BUCKET'] = wale['WAL_GCS_BUCKET']
write_envdir_names = gs_names + walg_names
elif wale.get('WAL_SWIFT_BUCKET') or wale.get('WALE_SWIFT_PREFIX'):
write_envdir_names = swift_names
elif wale.get("WALG_AZ_PREFIX"):
write_envdir_names = azure_names + walg_names
elif wale.get("WALG_SSH_PREFIX"):
write_envdir_names = ssh_names + walg_names
else:
return
prefix_env_name = write_envdir_names[0]
store_type = prefix_env_name[5:].split('_')[0]
if not wale.get(prefix_env_name): # WALE_*_PREFIX is not defined in the environment
bucket_path = '/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION}'.format(**wale)
prefix_template = '{0}://{{WAL_{1}_BUCKET}}{2}'.format(store_type.lower(), store_type, bucket_path)
wale[prefix_env_name] = prefix_template.format(**wale)
# Set WALG_*_PREFIX for future compatibility
if store_type in ('S3', 'GS') and not wale.get(write_envdir_names[1]):
wale[write_envdir_names[1]] = wale[prefix_env_name]
if not os.path.exists(wale['WALE_ENV_DIR']):
os.makedirs(wale['WALE_ENV_DIR'])
wale['WALE_LOG_DESTINATION'] = 'stderr'
for name in write_envdir_names + ['WALE_LOG_DESTINATION', 'PGPORT'] + ([] if prefix else ['BACKUP_NUM_TO_RETAIN']):
if wale.get(name):
path = os.path.join(wale['WALE_ENV_DIR'], name)
write_file(wale[name], path, overwrite)
adjust_owner(path, gid=-1)
if not os.path.exists(placeholders['WALE_TMPDIR']):
os.makedirs(placeholders['WALE_TMPDIR'])
os.chmod(placeholders['WALE_TMPDIR'], 0o1777)
write_file(placeholders['WALE_TMPDIR'], os.path.join(wale['WALE_ENV_DIR'], 'TMPDIR'), True)
|
20,080 |
def get_deployment_updates_manager(preview=False):
"""
Get the current app's deployment updates manager, create if necessary
"""
if preview:
return current_app.config.setdefault(
'deployment_updates_preview_manager',
DeploymentUpdateManager(get_read_only_storage_manager())
)
return current_app.config.setdefault(
'deployment_updates_manager',
DeploymentUpdateManager(get_storage_manager())
)
|
def get_deployment_updates_manager(preview=False):
"""
[k for k in old_settings.get('default_schedules', [])]
"""
if preview:
return current_app.config.setdefault(
'deployment_updates_preview_manager',
DeploymentUpdateManager(get_read_only_storage_manager())
)
return current_app.config.setdefault(
'deployment_updates_manager',
DeploymentUpdateManager(get_storage_manager())
)
|
53,137 |
def complete_set_root(args):
"""Set the root directory within the context of a cli completion operation."""
# if we have already set to something other than empty string break
if get_root() != '':
return
root = os.getenv('DDEV_ROOT', '')
if root and os.path.isdir(root):
set_root(root)
else:
config = load_config()
repo_map = {
'--core': 'core',
'-c': 'core',
'--extras': 'extras',
'-e': 'extras',
'--agent': 'agent',
'-a': 'agent',
'--here': 'here',
'-x': 'here',
}
for arg in args:
if arg in repo_map:
repo_choice = repo_map[arg]
break
else:
repo_choice = config.get('repo', 'core')
root = os.path.expanduser(config.get(repo_choice, ''))
if repo_choice == 'here' or not os.path.exists(root):
root = os.getcwd()
set_root(root)
|
def complete_set_root(args):
"""Set the root directory within the context of a cli completion operation."""
# if we have already set to something other than empty string break
existing_root = get_root()
if existing_root:
return
return
root = os.getenv('DDEV_ROOT', '')
if root and os.path.isdir(root):
set_root(root)
else:
config = load_config()
repo_map = {
'--core': 'core',
'-c': 'core',
'--extras': 'extras',
'-e': 'extras',
'--agent': 'agent',
'-a': 'agent',
'--here': 'here',
'-x': 'here',
}
for arg in args:
if arg in repo_map:
repo_choice = repo_map[arg]
break
else:
repo_choice = config.get('repo', 'core')
root = os.path.expanduser(config.get(repo_choice, ''))
if repo_choice == 'here' or not os.path.exists(root):
root = os.getcwd()
set_root(root)
|
54,789 |
def delete_config(filename="config.toml", directory=None):
"""Delete a configuration file
Keyword Args:
filename (str): the configuration file to delete
directory (str): the directory of the configuration file if None, use
default directory
"""
if directory is None:
file_path = get_default_config_path(filename)
else:
file_path = os.path.join(directory, filename)
os.remove(file_path)
|
def delete_config(filename="config.toml", directory=None):
"""Delete a configuration file
Keyword Args:
filename (str): the configuration file to delete
directory (str): The directory of the configuration file to delete.
If ``None``, the currently active configuration file is deleted.
"""
if directory is None:
file_path = get_default_config_path(filename)
else:
file_path = os.path.join(directory, filename)
os.remove(file_path)
|
21,795 |
def get_recent_users(
txn: LoggingTransaction, since_ms: int, exclude_app_service: bool
) -> List[UserInfo]:
"""Fetches recently registered users and some info on them."""
sql = """
SELECT name, creation_ts FROM users
WHERE
? <= creation_ts
AND deactivated = 0
"""
if exclude_app_service:
sql += "\n AND appservice_id IS NOT NULL"
txn.execute(sql, (since_ms / 1000,))
user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn]
for user_info in user_infos:
user_info.emails = DatabasePool.simple_select_onecol_txn(
txn,
table="user_threepids",
keyvalues={"user_id": user_info.user_id, "medium": "email"},
retcol="address",
)
sql = """
SELECT room_id, canonical_alias, name, join_rules
FROM local_current_membership
INNER JOIN room_stats_state USING (room_id)
WHERE user_id = ? AND membership = 'join'
"""
txn.execute(sql, (user_info.user_id,))
for room_id, canonical_alias, name, join_rules in txn:
if join_rules == "public":
user_info.public_rooms.append(canonical_alias or name or room_id)
else:
user_info.private_rooms.append(canonical_alias or name or room_id)
user_info.ips = DatabasePool.simple_select_onecol_txn(
txn,
table="user_ips",
keyvalues={"user_id": user_info.user_id},
retcol="ip",
)
return user_infos
|
def get_recent_users(
txn: LoggingTransaction, since_ms: int, exclude_app_service: bool
) -> List[UserInfo]:
"""Fetches recently registered users and some info on them."""
sql = """
SELECT name, creation_ts FROM users
WHERE
? <= creation_ts
AND deactivated = 0
"""
if exclude_app_service:
sql += " AND appservice_id IS NOT NULL"
txn.execute(sql, (since_ms / 1000,))
user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn]
for user_info in user_infos:
user_info.emails = DatabasePool.simple_select_onecol_txn(
txn,
table="user_threepids",
keyvalues={"user_id": user_info.user_id, "medium": "email"},
retcol="address",
)
sql = """
SELECT room_id, canonical_alias, name, join_rules
FROM local_current_membership
INNER JOIN room_stats_state USING (room_id)
WHERE user_id = ? AND membership = 'join'
"""
txn.execute(sql, (user_info.user_id,))
for room_id, canonical_alias, name, join_rules in txn:
if join_rules == "public":
user_info.public_rooms.append(canonical_alias or name or room_id)
else:
user_info.private_rooms.append(canonical_alias or name or room_id)
user_info.ips = DatabasePool.simple_select_onecol_txn(
txn,
table="user_ips",
keyvalues={"user_id": user_info.user_id},
retcol="ip",
)
return user_infos
|
46,542 |
def build_spec(version: str, source_files: List[str]) -> str:
all_spescs = [get_spec(spec) for spec in source_files]
spec_object = all_spescs[0]
for value in all_spescs[1:]:
spec_object = combine_spec_objects(spec_object, value)
dependency_order_spec(spec_object)
return objects_to_spec(spec_object, version_imports[version], version)
|
def build_spec(version: str, source_files: List[str]) -> str:
all_spescs = [get_spec(spec) for spec in source_files]
spec_object = all_spescs[0]
for value in all_specs[1:]:
spec_object = combine_spec_objects(spec_object, value)
dependency_order_spec(spec_object)
return objects_to_spec(spec_object, version_imports[version], version)
|
37,619 |
def backend_monitor(backend):
"""Monitor a single IBMQ backend.
Args:
backend (IBMQBackend): Backend to monitor.
Raises:
QiskitError: Input is not a IBMQ backend.
MissingOptionalLibraryError: If qiskit-ibmq-provider is not installed
"""
warnings.warn(
"The qiskit.IBMQ entrypoint and the qiskit-ibmq-provider package ("
"accessible from 'qiskit.providers.ibmq`) are deprecated and will be removed "
"in a future release. Instead you should use the qiskit-ibm-provider package "
"which is accesible from 'qiskit_ibm_provider'.",
DeprecationWarning,
stacklevel=2,
)
try:
from qiskit.providers.ibmq import IBMQBackend
except ImportError as ex:
raise MissingOptionalLibraryError(
libname="qiskit-ibmq-provider",
name="backend_monitor",
pip_install="pip install qiskit-ibmq-provider",
) from ex
if not isinstance(backend, IBMQBackend):
raise QiskitError("Input variable is not of type IBMQBackend.")
config = backend.configuration().to_dict()
status = backend.status().to_dict()
config_dict = {**status, **config}
print(backend.name())
print("=" * len(backend.name()))
print("Configuration")
print("-" * 13)
offset = " "
upper_list = [
"n_qubits",
"operational",
"status_msg",
"pending_jobs",
"backend_version",
"basis_gates",
"local",
"simulator",
]
lower_list = list(set(config_dict.keys()).difference(upper_list))
# Remove gates because they are in a different tab
lower_list.remove("gates")
for item in upper_list + lower_list:
print(offset + item + ":", config_dict[item])
# Stop here if simulator
if config["simulator"]:
return
print()
props = backend.properties()
qubit_header = None
sep = " / "
for index, qubit_data in enumerate(props.qubits):
name = "Q%s" % index
gate_data = [gate for gate in props.gates if gate.qubits == [index]]
cal_data = dict.fromkeys(["T1", "T2", "frequency", "readout_error"], "Unknown")
for nduv in qubit_data:
if nduv.name in cal_data:
cal_data[nduv.name] = format(nduv.value, ".5f") + " " + nduv.unit
gate_names = []
gate_error = []
for gd in gate_data:
if gd.gate in ["id"]:
continue
try:
gate_error.append(format(props.gate_error(gd.gate, index), ".5f"))
gate_names.append(gd.gate.upper() + " err")
except QiskitError:
pass
if not qubit_header:
qubit_header = (
"Qubits [Name / Freq / T1 / T2" + sep.join([""] + gate_names) + " / Readout err]"
)
print(qubit_header)
print("-" * len(qubit_header))
qstr = sep.join(
[name, cal_data["frequency"], cal_data["T1"], cal_data["T2"]]
+ gate_error
+ [cal_data["readout_error"]]
)
print(offset + qstr)
print()
multi_qubit_gates = [g for g in props.gates if len(g.qubits) > 1]
multi_header = "Multi-Qubit Gates [Name / Type / Gate Error]"
print(multi_header)
print("-" * len(multi_header))
for gate in multi_qubit_gates:
qubits = gate.qubits
ttype = gate.gate
error = "Unknown"
try:
error = format(props.gate_error(gate.gate, qubits), ".5f")
except QiskitError:
pass
mstr = sep.join([f"{ttype}{qubits[0]}_{qubits[1]}", ttype, str(error)])
print(offset + mstr)
|
def backend_monitor(backend):
"""Monitor a single IBMQ backend.
Args:
backend (IBMQBackend): Backend to monitor.
Raises:
QiskitError: Input is not a IBMQ backend.
MissingOptionalLibraryError: If qiskit-ibmq-provider is not installed
"""
warnings.warn(
"The qiskit.IBMQ entrypoint and the qiskit-ibmq-provider package ("
"accessible from 'qiskit.providers.ibmq`) are deprecated and will be removed "
"in a future release. Instead you should use the qiskit-ibm-provider package "
"which is accessible from 'qiskit_ibm_provider'.",
DeprecationWarning,
stacklevel=2,
)
try:
from qiskit.providers.ibmq import IBMQBackend
except ImportError as ex:
raise MissingOptionalLibraryError(
libname="qiskit-ibmq-provider",
name="backend_monitor",
pip_install="pip install qiskit-ibmq-provider",
) from ex
if not isinstance(backend, IBMQBackend):
raise QiskitError("Input variable is not of type IBMQBackend.")
config = backend.configuration().to_dict()
status = backend.status().to_dict()
config_dict = {**status, **config}
print(backend.name())
print("=" * len(backend.name()))
print("Configuration")
print("-" * 13)
offset = " "
upper_list = [
"n_qubits",
"operational",
"status_msg",
"pending_jobs",
"backend_version",
"basis_gates",
"local",
"simulator",
]
lower_list = list(set(config_dict.keys()).difference(upper_list))
# Remove gates because they are in a different tab
lower_list.remove("gates")
for item in upper_list + lower_list:
print(offset + item + ":", config_dict[item])
# Stop here if simulator
if config["simulator"]:
return
print()
props = backend.properties()
qubit_header = None
sep = " / "
for index, qubit_data in enumerate(props.qubits):
name = "Q%s" % index
gate_data = [gate for gate in props.gates if gate.qubits == [index]]
cal_data = dict.fromkeys(["T1", "T2", "frequency", "readout_error"], "Unknown")
for nduv in qubit_data:
if nduv.name in cal_data:
cal_data[nduv.name] = format(nduv.value, ".5f") + " " + nduv.unit
gate_names = []
gate_error = []
for gd in gate_data:
if gd.gate in ["id"]:
continue
try:
gate_error.append(format(props.gate_error(gd.gate, index), ".5f"))
gate_names.append(gd.gate.upper() + " err")
except QiskitError:
pass
if not qubit_header:
qubit_header = (
"Qubits [Name / Freq / T1 / T2" + sep.join([""] + gate_names) + " / Readout err]"
)
print(qubit_header)
print("-" * len(qubit_header))
qstr = sep.join(
[name, cal_data["frequency"], cal_data["T1"], cal_data["T2"]]
+ gate_error
+ [cal_data["readout_error"]]
)
print(offset + qstr)
print()
multi_qubit_gates = [g for g in props.gates if len(g.qubits) > 1]
multi_header = "Multi-Qubit Gates [Name / Type / Gate Error]"
print(multi_header)
print("-" * len(multi_header))
for gate in multi_qubit_gates:
qubits = gate.qubits
ttype = gate.gate
error = "Unknown"
try:
error = format(props.gate_error(gate.gate, qubits), ".5f")
except QiskitError:
pass
mstr = sep.join([f"{ttype}{qubits[0]}_{qubits[1]}", ttype, str(error)])
print(offset + mstr)
|
43,695 |
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' argument must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.bit_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' argument must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
55,776 |
def test_hold_to_pan_zoom():
data = np.random.randint(0, high=255, size=(100, 100)).astype('uint8')
layer = Labels(data)
layer.mode = 'paint'
# need to go through the generator
_ = list(key_bindings.hold_to_pan_zoom(layer))
|
def test_hold_to_pan_zoom():
data = np.random.randint(0, high=255, size=(100, 100)).astype('uint8')
layer = Labels(data)
layer.mode = 'paint'
# need to go through the generator
gen = key_bindings.hold_to_pan_zoom(layer)
next(gen)
assert layer.mode == 'pan_zoom'
with contextlib.suppress(StopIteration):
next(gen)
assert layer.mode == 'paint'
|
4,124 |
def approx_pi(n: cython.int=10000000):
val: cython.double = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
|
def approx_pi(n: cython.int = 10000000):
val: cython.double = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
|
8,924 |
def ctcp(function=None, *command_list):
"""Decorate a callable to trigger on CTCP commands (mostly, ``ACTION``).
:param str ctcp_command: one or more CTCP command(s) on which to trigger
(really, the only useful value is ``ACTION``)
.. versionadded:: 7.1
This is now ``ctcp`` instead of ``intent``, and it can be called
without argument, assuming ``ACTION`` in that case.
.. note::
This used to be ``@intent``, for a long dead feature in the IRCv3 spec.
It is now replaced by ``@ctcp``, which can be used without arguments.
In that case, Sopel will assume to trigger on ``ACTION``.
As ``sopel.module`` will be removed in Sopel 9, so will ``@intent``.
"""
default_commands = ('ACTION',) + command_list
if function is None:
return ctcp(*default_commands) # called as ``@ctcp()``
elif callable(function):
# called as ``@ctcp`` or ``@ctcp(function)``
# or even ``@ctcp(function, 'ACTION', ...)``
return ctcp(*default_commands)(function)
# function is not None, and it is not a callable
# called as ``@ctcp('ACTION', ...)``
ctcp_commands = (function,) + command_list
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, "intents"):
function.intents = []
for name in ctcp_commands:
if name not in function.intents:
function.intents.append(name)
return function
return add_attribute
|
def ctcp(function=None, *command_list):
"""Decorate a callable to trigger on CTCP commands (mostly, ``ACTION``).
:param str ctcp_command: one or more CTCP command(s) on which to trigger
(the most commonly useful value is ``ACTION``)
.. versionadded:: 7.1
This is now ``ctcp`` instead of ``intent``, and it can be called
without argument, assuming ``ACTION`` in that case.
.. note::
This used to be ``@intent``, for a long dead feature in the IRCv3 spec.
It is now replaced by ``@ctcp``, which can be used without arguments.
In that case, Sopel will assume to trigger on ``ACTION``.
As ``sopel.module`` will be removed in Sopel 9, so will ``@intent``.
"""
default_commands = ('ACTION',) + command_list
if function is None:
return ctcp(*default_commands) # called as ``@ctcp()``
elif callable(function):
# called as ``@ctcp`` or ``@ctcp(function)``
# or even ``@ctcp(function, 'ACTION', ...)``
return ctcp(*default_commands)(function)
# function is not None, and it is not a callable
# called as ``@ctcp('ACTION', ...)``
ctcp_commands = (function,) + command_list
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, "intents"):
function.intents = []
for name in ctcp_commands:
if name not in function.intents:
function.intents.append(name)
return function
return add_attribute
|
23,280 |
def attrs(
maybe_cls=None,
these=None,
repr_ns=None,
repr=None,
cmp=None,
hash=None,
init=None,
slots=False,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=False,
kw_only=False,
cache_hash=False,
auto_exc=False,
eq=None,
order=None,
auto_detect=False,
collect_by_mro=False,
getstate_setstate=None,
on_setattr=None,
field_transformer=None,
match_args=True,
):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using `attr.ib` or the *these* argument.
:param these: A dictionary of name to `attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes and will *not* remove any attributes from it.
If *these* is an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the attributes inside *these*. Otherwise the order
of the definition of the attributes is used.
:type these: `dict` of `str` to `attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
*order*, and *hash* arguments explicitly, assume they are set to
``True`` **unless any** of the involved methods for one of the
arguments is implemented in the *current* class (i.e. it is *not*
inherited from some base class).
So for example by implementing ``__eq__`` on a class yourself,
``attrs`` will deduce ``eq=False`` and will create *neither*
``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
``__ne__`` by default, so it *should* be enough to only implement
``__eq__`` in most cases).
.. warning::
If you prevent ``attrs`` from creating the ordering methods for you
(``order=False``, e.g. by implementing ``__le__``), it becomes
*your* responsibility to make sure its ordering is sound. The best
way is to use the `functools.total_ordering` decorator.
Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
*cmp*, or *hash* overrides whatever *auto_detect* would determine.
*auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
an `attrs.exceptions.PythonTooOldError`.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
`Exception`\ s.
:param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
and ``__ne__`` methods that check two instances for equality.
They compare the instances as if they were tuples of their ``attrs``
attributes if and only if the types of both classes are *identical*!
:param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that behave like *eq* above and
allow instances to be ordered. If ``None`` (default) mirror value of
*eq*.
:param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
and *order* to the same value. Must not be mixed with *eq* or *order*.
:param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
is generated according how *eq* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *eq* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the base class will be used (if base class is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See our documentation on `hashing`, Python's documentation on
`object.__hash__`, and the `GitHub issue that led to the default \
behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
details.
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the argument
name. If a ``__attrs_pre_init__`` method exists on the class, it will
be called before the class is initialized. If a ``__attrs_post_init__``
method exists on the class, it will be called after the class is fully
initialized.
If ``init`` is ``False``, an ``__attrs_init__`` method will be
injected instead. This allows you to define a custom ``__init__``
method that can do pre-init work such as ``super().__init__()``,
and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
:param bool slots: Create a `slotted class <slotted classes>` that's more
memory-efficient. Slotted classes are generally superior to the default
dict classes, but have some gotchas you should know about, so we
encourage you to read the `glossary entry <slotted classes>`.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
`attr.exceptions.FrozenInstanceError` is raised.
.. note::
1. This is achieved by installing a custom ``__setattr__`` method
on your class, so you can't implement your own.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance `impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
5. Subclasses of a frozen class are frozen too.
:param bool weakref_slot: Make instances weak-referenceable. This has no
effect unless ``slots`` is also enabled.
:param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated
attributes (Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an `attr.ib` but lacks a type
annotation, an `attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also
works as expected in most cases (see warning below).
Attributes annotated as `typing.ClassVar`, and attributes that are
neither annotated nor set to an `attr.ib` are **ignored**.
.. warning::
For features that use the attribute name to create decorators (e.g.
`validators <validators>`), you still *must* assign `attr.ib` to
them. Otherwise Python will either not find the name or try to use
the default value to call e.g. ``validator`` on it.
These errors can be quite confusing and probably the most common bug
report on our bug tracker.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
:param bool kw_only: Make all attributes keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param bool cache_hash: Ensure that the object's hash code is computed
only once and stored on the object. If this is set to ``True``,
hashing must be either explicitly or implicitly enabled for this
class. If the hash code is cached, avoid any reassignments of
fields involved in hash code computation or mutations of the objects
those fields point to after object creation. If such changes occur,
the behavior of the object's hash code is undefined.
:param bool auto_exc: If the class subclasses `BaseException`
(which implicitly includes any subclass of any exception), the
following happens to behave like a well-behaved Python exceptions
class:
- the values for *eq*, *order*, and *hash* are ignored and the
instances compare and hash by the instance's ids (N.B. ``attrs`` will
*not* remove existing implementations of ``__hash__`` or the equality
methods. It just won't add own ones.),
- all attributes that are either passed into ``__init__`` or have a
default value are additionally available as a tuple in the ``args``
attribute,
- the value of *str* is ignored leaving ``__str__`` to base classes.
:param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
collects attributes from base classes. The default behavior is
incorrect in certain cases of multiple inheritance. It should be on by
default but is kept off for backward-compatibility.
See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
more details.
:param Optional[bool] getstate_setstate:
.. note::
This is usually only interesting for slotted classes and you should
probably just set *auto_detect* to `True`.
If `True`, ``__getstate__`` and
``__setstate__`` are generated and attached to the class. This is
necessary for slotted classes to be pickleable. If left `None`, it's
`True` by default for slotted classes and ``False`` for dict classes.
If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
and **either** ``__getstate__`` or ``__setstate__`` is detected directly
on the class (i.e. not inherited), it is set to `False` (this is usually
what you want).
:param on_setattr: A callable that is run whenever the user attempts to set
an attribute (either by assignment like ``i.x = 42`` or by using
`setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
as validators: the instance, the attribute that is being modified, and
the new value.
If no exception is raised, the attribute is set to the return value of
the callable.
If a list of callables is passed, they're automatically wrapped in an
`attrs.setters.pipe`.
:type on_setattr: `callable`, or a list of callables, or `None`, or
`attrs.setters.NO_OP`
:param Optional[callable] field_transformer:
A function that is called with the original class object and all
fields right before ``attrs`` finalizes the class. You can use
this, e.g., to automatically add converters or validators to
fields based on their types. See `transform-fields` for more details.
:param bool match_args:
If `True` (default), set ``__match_args__`` on the class to support
`PEP 634 <https://www.python.org/dev/peps/pep-0634/>`_ (Structural
Pattern Matching). It is a tuple of all positional ``__init__``
parameter names on Python 3.10 and later. Ignored on older Python
versions.
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*
.. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
.. versionchanged:: 17.1.0
*hash* supports ``None`` as value which is also the default now.
.. versionadded:: 17.3.0 *auto_attribs*
.. versionchanged:: 18.1.0
If *these* is passed, no attributes are deleted from the class body.
.. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
.. versionadded:: 18.2.0 *weakref_slot*
.. deprecated:: 18.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
`DeprecationWarning` if the classes compared are subclasses of
each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
to each other.
.. versionchanged:: 19.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
subclasses comparable anymore.
.. versionadded:: 18.2.0 *kw_only*
.. versionadded:: 18.2.0 *cache_hash*
.. versionadded:: 19.1.0 *auto_exc*
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
.. versionadded:: 20.1.0 *auto_detect*
.. versionadded:: 20.1.0 *collect_by_mro*
.. versionadded:: 20.1.0 *getstate_setstate*
.. versionadded:: 20.1.0 *on_setattr*
.. versionadded:: 20.3.0 *field_transformer*
.. versionchanged:: 21.1.0
``init=False`` injects ``__attrs_init__``
.. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
.. versionchanged:: 21.1.0 *cmp* undeprecated
.. versionadded:: 21.3.0 *match_args*
"""
if auto_detect and PY2:
raise PythonTooOldError(
"auto_detect only works on Python 3 and later."
)
eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
hash_ = hash # work around the lack of nonlocal
if isinstance(on_setattr, (list, tuple)):
on_setattr = setters.pipe(*on_setattr)
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
is_frozen = frozen or _has_frozen_base_class(cls)
is_exc = auto_exc is True and issubclass(cls, BaseException)
has_own_setattr = auto_detect and _has_own_attribute(
cls, "__setattr__"
)
if has_own_setattr and is_frozen:
raise ValueError("Can't freeze a class with a custom __setattr__.")
builder = _ClassBuilder(
cls,
these,
slots,
is_frozen,
weakref_slot,
_determine_whether_to_implement(
cls,
getstate_setstate,
auto_detect,
("__getstate__", "__setstate__"),
default=slots,
),
auto_attribs,
kw_only,
cache_hash,
is_exc,
collect_by_mro,
on_setattr,
has_own_setattr,
field_transformer,
)
if _determine_whether_to_implement(
cls, repr, auto_detect, ("__repr__",)
):
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
eq = _determine_whether_to_implement(
cls, eq_, auto_detect, ("__eq__", "__ne__")
)
if not is_exc and eq is True:
builder.add_eq()
if not is_exc and _determine_whether_to_implement(
cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
):
builder.add_order()
builder.add_setattr()
if (
hash_ is None
and auto_detect is True
and _has_own_attribute(cls, "__hash__")
):
hash = False
else:
hash = hash_
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and eq is False) or is_exc:
# Don't do anything. Should fall back to __object__'s __hash__
# which is by id.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
elif hash is True or (
hash is None and eq is True and is_frozen is True
):
# Build a __hash__ if told so, or if it's safe.
builder.add_hash()
else:
# Raise TypeError on attempts to hash.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
builder.make_unhashable()
if _determine_whether_to_implement(
cls, init, auto_detect, ("__init__",)
):
builder.add_init()
else:
builder.add_attrs_init()
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" init must be True."
)
if (
PY310
and match_args
and not _has_own_attribute(cls, "__match_args__")
):
builder.add_match_args()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
|
def attrs(
maybe_cls=None,
these=None,
repr_ns=None,
repr=None,
cmp=None,
hash=None,
init=None,
slots=False,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=False,
kw_only=False,
cache_hash=False,
auto_exc=False,
eq=None,
order=None,
auto_detect=False,
collect_by_mro=False,
getstate_setstate=None,
on_setattr=None,
field_transformer=None,
match_args=True,
):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using `attr.ib` or the *these* argument.
:param these: A dictionary of name to `attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes and will *not* remove any attributes from it.
If *these* is an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the attributes inside *these*. Otherwise the order
of the definition of the attributes is used.
:type these: `dict` of `str` to `attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
*order*, and *hash* arguments explicitly, assume they are set to
``True`` **unless any** of the involved methods for one of the
arguments is implemented in the *current* class (i.e. it is *not*
inherited from some base class).
So for example by implementing ``__eq__`` on a class yourself,
``attrs`` will deduce ``eq=False`` and will create *neither*
``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
``__ne__`` by default, so it *should* be enough to only implement
``__eq__`` in most cases).
.. warning::
If you prevent ``attrs`` from creating the ordering methods for you
(``order=False``, e.g. by implementing ``__le__``), it becomes
*your* responsibility to make sure its ordering is sound. The best
way is to use the `functools.total_ordering` decorator.
Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
*cmp*, or *hash* overrides whatever *auto_detect* would determine.
*auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
an `attrs.exceptions.PythonTooOldError`.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
`Exception`\ s.
:param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
and ``__ne__`` methods that check two instances for equality.
They compare the instances as if they were tuples of their ``attrs``
attributes if and only if the types of both classes are *identical*!
:param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that behave like *eq* above and
allow instances to be ordered. If ``None`` (default) mirror value of
*eq*.
:param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
and *order* to the same value. Must not be mixed with *eq* or *order*.
:param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
is generated according how *eq* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *eq* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the base class will be used (if base class is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See our documentation on `hashing`, Python's documentation on
`object.__hash__`, and the `GitHub issue that led to the default \
behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
details.
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the argument
name. If a ``__attrs_pre_init__`` method exists on the class, it will
be called before the class is initialized. If a ``__attrs_post_init__``
method exists on the class, it will be called after the class is fully
initialized.
If ``init`` is ``False``, an ``__attrs_init__`` method will be
injected instead. This allows you to define a custom ``__init__``
method that can do pre-init work such as ``super().__init__()``,
and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
:param bool slots: Create a `slotted class <slotted classes>` that's more
memory-efficient. Slotted classes are generally superior to the default
dict classes, but have some gotchas you should know about, so we
encourage you to read the `glossary entry <slotted classes>`.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
`attr.exceptions.FrozenInstanceError` is raised.
.. note::
1. This is achieved by installing a custom ``__setattr__`` method
on your class, so you can't implement your own.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance `impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
5. Subclasses of a frozen class are frozen too.
:param bool weakref_slot: Make instances weak-referenceable. This has no
effect unless ``slots`` is also enabled.
:param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated
attributes (Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an `attr.ib` but lacks a type
annotation, an `attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also
works as expected in most cases (see warning below).
Attributes annotated as `typing.ClassVar`, and attributes that are
neither annotated nor set to an `attr.ib` are **ignored**.
.. warning::
For features that use the attribute name to create decorators (e.g.
`validators <validators>`), you still *must* assign `attr.ib` to
them. Otherwise Python will either not find the name or try to use
the default value to call e.g. ``validator`` on it.
These errors can be quite confusing and probably the most common bug
report on our bug tracker.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
:param bool kw_only: Make all attributes keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param bool cache_hash: Ensure that the object's hash code is computed
only once and stored on the object. If this is set to ``True``,
hashing must be either explicitly or implicitly enabled for this
class. If the hash code is cached, avoid any reassignments of
fields involved in hash code computation or mutations of the objects
those fields point to after object creation. If such changes occur,
the behavior of the object's hash code is undefined.
:param bool auto_exc: If the class subclasses `BaseException`
(which implicitly includes any subclass of any exception), the
following happens to behave like a well-behaved Python exceptions
class:
- the values for *eq*, *order*, and *hash* are ignored and the
instances compare and hash by the instance's ids (N.B. ``attrs`` will
*not* remove existing implementations of ``__hash__`` or the equality
methods. It just won't add own ones.),
- all attributes that are either passed into ``__init__`` or have a
default value are additionally available as a tuple in the ``args``
attribute,
- the value of *str* is ignored leaving ``__str__`` to base classes.
:param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
collects attributes from base classes. The default behavior is
incorrect in certain cases of multiple inheritance. It should be on by
default but is kept off for backward-compatibility.
See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
more details.
:param Optional[bool] getstate_setstate:
.. note::
This is usually only interesting for slotted classes and you should
probably just set *auto_detect* to `True`.
If `True`, ``__getstate__`` and
``__setstate__`` are generated and attached to the class. This is
necessary for slotted classes to be pickleable. If left `None`, it's
`True` by default for slotted classes and ``False`` for dict classes.
If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
and **either** ``__getstate__`` or ``__setstate__`` is detected directly
on the class (i.e. not inherited), it is set to `False` (this is usually
what you want).
:param on_setattr: A callable that is run whenever the user attempts to set
an attribute (either by assignment like ``i.x = 42`` or by using
`setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
as validators: the instance, the attribute that is being modified, and
the new value.
If no exception is raised, the attribute is set to the return value of
the callable.
If a list of callables is passed, they're automatically wrapped in an
`attrs.setters.pipe`.
:type on_setattr: `callable`, or a list of callables, or `None`, or
`attrs.setters.NO_OP`
:param Optional[callable] field_transformer:
A function that is called with the original class object and all
fields right before ``attrs`` finalizes the class. You can use
this, e.g., to automatically add converters or validators to
fields based on their types. See `transform-fields` for more details.
:param bool match_args:
If `True` (default), set ``__match_args__`` on the class to support
`PEP 634 <https://www.python.org/dev/peps/pep-0634/>`_ (Structural
Pattern Matching). It is a tuple of all non-keyword-only ``__init__``
parameter names on Python 3.10 and later. Ignored on older Python
versions.
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*
.. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
.. versionchanged:: 17.1.0
*hash* supports ``None`` as value which is also the default now.
.. versionadded:: 17.3.0 *auto_attribs*
.. versionchanged:: 18.1.0
If *these* is passed, no attributes are deleted from the class body.
.. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
.. versionadded:: 18.2.0 *weakref_slot*
.. deprecated:: 18.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
`DeprecationWarning` if the classes compared are subclasses of
each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
to each other.
.. versionchanged:: 19.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
subclasses comparable anymore.
.. versionadded:: 18.2.0 *kw_only*
.. versionadded:: 18.2.0 *cache_hash*
.. versionadded:: 19.1.0 *auto_exc*
.. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
.. versionadded:: 19.2.0 *eq* and *order*
.. versionadded:: 20.1.0 *auto_detect*
.. versionadded:: 20.1.0 *collect_by_mro*
.. versionadded:: 20.1.0 *getstate_setstate*
.. versionadded:: 20.1.0 *on_setattr*
.. versionadded:: 20.3.0 *field_transformer*
.. versionchanged:: 21.1.0
``init=False`` injects ``__attrs_init__``
.. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
.. versionchanged:: 21.1.0 *cmp* undeprecated
.. versionadded:: 21.3.0 *match_args*
"""
if auto_detect and PY2:
raise PythonTooOldError(
"auto_detect only works on Python 3 and later."
)
eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
hash_ = hash # work around the lack of nonlocal
if isinstance(on_setattr, (list, tuple)):
on_setattr = setters.pipe(*on_setattr)
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
is_frozen = frozen or _has_frozen_base_class(cls)
is_exc = auto_exc is True and issubclass(cls, BaseException)
has_own_setattr = auto_detect and _has_own_attribute(
cls, "__setattr__"
)
if has_own_setattr and is_frozen:
raise ValueError("Can't freeze a class with a custom __setattr__.")
builder = _ClassBuilder(
cls,
these,
slots,
is_frozen,
weakref_slot,
_determine_whether_to_implement(
cls,
getstate_setstate,
auto_detect,
("__getstate__", "__setstate__"),
default=slots,
),
auto_attribs,
kw_only,
cache_hash,
is_exc,
collect_by_mro,
on_setattr,
has_own_setattr,
field_transformer,
)
if _determine_whether_to_implement(
cls, repr, auto_detect, ("__repr__",)
):
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
eq = _determine_whether_to_implement(
cls, eq_, auto_detect, ("__eq__", "__ne__")
)
if not is_exc and eq is True:
builder.add_eq()
if not is_exc and _determine_whether_to_implement(
cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
):
builder.add_order()
builder.add_setattr()
if (
hash_ is None
and auto_detect is True
and _has_own_attribute(cls, "__hash__")
):
hash = False
else:
hash = hash_
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and eq is False) or is_exc:
# Don't do anything. Should fall back to __object__'s __hash__
# which is by id.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
elif hash is True or (
hash is None and eq is True and is_frozen is True
):
# Build a __hash__ if told so, or if it's safe.
builder.add_hash()
else:
# Raise TypeError on attempts to hash.
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
builder.make_unhashable()
if _determine_whether_to_implement(
cls, init, auto_detect, ("__init__",)
):
builder.add_init()
else:
builder.add_attrs_init()
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" init must be True."
)
if (
PY310
and match_args
and not _has_own_attribute(cls, "__match_args__")
):
builder.add_match_args()
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
|
57,927 |
def ztap_get_alert_entries(
client: Client,
args: Dict,
):
"""
Gets all entries (comments/logs) for an alert
"""
try:
alert_id = args.get("id")
alert = {
"status": "assigned",
"id": alert_id,
"datetime_created": EPOCH,
"datetime_closed": None,
}
investigation: Dict = {}
entries = get_notes_for_alert(
client, investigation, alert, epoch(), update_status=False
)
return entries
except Exception as e:
if "Rate limit" in str(e):
return_error("API rate limit")
return_error(str(e))
|
def ztap_get_alert_entries(
client: Client,
args: Dict,
):
"""
Gets all entries (comments/logs) for an alert
"""
try:
alert_id = args.get("id")
alert = {
"status": "assigned",
"id": alert_id,
"datetime_created": EPOCH,
"datetime_closed": None,
}
investigation: Dict = {}
entries = get_notes_for_alert(
client, investigation, alert, epoch(), update_status=False
)
return entries
except Exception as e:
if "Rate limit" in str(e):
raise Exception("API rate limit")
raise Exception(str(e))
|
53,803 |
def headmsk_wf(name='HeadMaskWorkflow', use_bet=True):
"""
Computes a head mask as in [Mortamet2009]_.
.. workflow::
from mriqc.workflows.anatomical import headmsk_wf
wf = headmsk_wf()
"""
has_dipy = False
try:
from dipy.denoise import nlmeans # noqa
has_dipy = True
except ImportError:
pass
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_segm']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), name='outputnode')
if use_bet or not has_dipy:
from nipype.interfaces.fsl import BET
# Alternative for when dipy is not installed
bet = pe.Node(BET(surfaces=True), name='fsl_bet')
workflow.connect([
(inputnode, bet, [('in_file', 'in_file')]),
(bet, outputnode, [('outskin_mask_file', 'out_file')])
])
else:
from nipype.interfaces.dipy import Denoise
enhance = pe.Node(niu.Function(
input_names=['in_file'], output_names=['out_file'], function=_enhance), name='Enhance')
estsnr = pe.Node(niu.Function(
input_names=['in_file', 'seg_file'], output_names=['out_snr'],
function=_estimate_snr), name='EstimateSNR')
denoise = pe.Node(Denoise(), name='Denoise')
gradient = pe.Node(niu.Function(
input_names=['in_file', 'snr'], output_names=['out_file'],
function=image_gradient), name='Grad')
thresh = pe.Node(niu.Function(
input_names=['in_file', 'in_segm'], output_names=['out_file'],
function=gradient_threshold), name='GradientThreshold')
workflow.connect([
(inputnode, estsnr, [('in_file', 'in_file'),
('in_segm', 'seg_file')]),
(estsnr, denoise, [('out_snr', 'snr')]),
(inputnode, enhance, [('in_file', 'in_file')]),
(enhance, denoise, [('out_file', 'in_file')]),
(estsnr, gradient, [('out_snr', 'snr')]),
(denoise, gradient, [('out_file', 'in_file')]),
(inputnode, thresh, [('in_segm', 'in_segm')]),
(gradient, thresh, [('out_file', 'in_file')]),
(thresh, outputnode, [('out_file', 'out_file')])
])
return workflow
|
def headmsk_wf(name='HeadMaskWorkflow', use_bet=True):
"""
Computes a head mask as in [Mortamet2009]_.
.. workflow::
from mriqc.workflows.anatomical import headmsk_wf
wf = headmsk_wf()
"""
has_dipy = False
try:
from dipy.denoise import nlmeans # noqa
has_dipy = True
except ImportError:
pass
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_segm']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), name='outputnode')
if not use_bet and not has_dipy:
raise RuntimeError("Either FSL or DIPY must be installed.")
if use_bet or not has_dipy:
from nipype.interfaces.fsl import BET
# Alternative for when dipy is not installed
bet = pe.Node(BET(surfaces=True), name='fsl_bet')
workflow.connect([
(inputnode, bet, [('in_file', 'in_file')]),
(bet, outputnode, [('outskin_mask_file', 'out_file')])
])
else:
from nipype.interfaces.dipy import Denoise
enhance = pe.Node(niu.Function(
input_names=['in_file'], output_names=['out_file'], function=_enhance), name='Enhance')
estsnr = pe.Node(niu.Function(
input_names=['in_file', 'seg_file'], output_names=['out_snr'],
function=_estimate_snr), name='EstimateSNR')
denoise = pe.Node(Denoise(), name='Denoise')
gradient = pe.Node(niu.Function(
input_names=['in_file', 'snr'], output_names=['out_file'],
function=image_gradient), name='Grad')
thresh = pe.Node(niu.Function(
input_names=['in_file', 'in_segm'], output_names=['out_file'],
function=gradient_threshold), name='GradientThreshold')
workflow.connect([
(inputnode, estsnr, [('in_file', 'in_file'),
('in_segm', 'seg_file')]),
(estsnr, denoise, [('out_snr', 'snr')]),
(inputnode, enhance, [('in_file', 'in_file')]),
(enhance, denoise, [('out_file', 'in_file')]),
(estsnr, gradient, [('out_snr', 'snr')]),
(denoise, gradient, [('out_file', 'in_file')]),
(inputnode, thresh, [('in_segm', 'in_segm')]),
(gradient, thresh, [('out_file', 'in_file')]),
(thresh, outputnode, [('out_file', 'out_file')])
])
return workflow
|
4,531 |
def test_searchlight():
# Create a toy dataset to run searchlight on
# Initialize with 4x4x4 scans of random values on 30 frames
rand = np.random.RandomState(0)
frames = 30
data = rand.rand(5, 5, 5, frames)
mask = np.ones((5, 5, 5), np.bool)
mask_img = nibabel.Nifti1Image(mask.astype(np.int), np.eye(4))
# Create a condition array
cond = np.arange(frames, dtype=int) >= (frames // 2)
# Create an activation pixel.
data[2, 2, 2, :] = 0
data[2, 2, 2][cond.astype(np.bool)] = 2
data_img = nibabel.Nifti1Image(data, np.eye(4))
# Define cross validation
from sklearn.model_selection import KFold
cv = KFold(n_splits=4)
n_jobs = 1
# Run Searchlight with different radii
# Small radius : only one pixel is selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img,
radius=0.5, n_jobs=n_jobs,
scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 1)
assert_equal(sl.scores_[2, 2, 2], 1.)
# The voxel selected in process_mask_img is too far from the signal
process_mask = np.zeros((5, 5, 5), np.bool)
process_mask[0, 0, 0] = True
process_mask_img = nibabel.Nifti1Image(process_mask.astype(np.int),
np.eye(4))
sl = searchlight.SearchLight(mask_img, process_mask_img=process_mask_img,
radius=0.5, n_jobs=n_jobs,
scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 0)
# Medium radius : little ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 7)
assert_equal(sl.scores_[2, 2, 2], 1.)
assert_equal(sl.scores_[1, 2, 2], 1.)
assert_equal(sl.scores_[2, 1, 2], 1.)
assert_equal(sl.scores_[2, 2, 1], 1.)
assert_equal(sl.scores_[3, 2, 2], 1.)
assert_equal(sl.scores_[2, 3, 2], 1.)
assert_equal(sl.scores_[2, 2, 3], 1.)
# Big radius : big ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=2,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 33)
assert_equal(sl.scores_[2, 2, 2], 1.)
# group cross validation
try:
from sklearn.model_selection import LeaveOneGroupOut
gcv = LeaveOneGroupOut()
except ImportError:
# won't import model selection if it's not there.
# the groups variable should have no effect.
gcv = cv
groups = np.random.permutation(np.arange(frames, dtype=int) >
(frames // 2))
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=gcv)
sl.fit(data_img, cond, groups)
assert_equal(np.where(sl.scores_ == 1)[0].size, 7)
assert_equal(sl.scores_[2, 2, 2], 1.)
# adding superfluous group variable
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond, groups)
assert_equal(np.where(sl.scores_ == 1)[0].size, 7)
assert_equal(sl.scores_[2, 2, 2], 1.)
# Check whether searchlight works on list of 3D images
rand = np.random.RandomState(0)
data = rand.rand(5, 5, 5)
data_img = nibabel.Nifti1Image(data, affine=np.eye(4))
imgs = [data_img, data_img, data_img, data_img, data_img, data_img]
# labels
y = [0, 1, 0, 1, 0, 1]
# run searchlight on list of 3D images
sl = searchlight.SearchLight(mask_img)
sl.fit(imgs, y)
|
def test_searchlight():
# Create a toy dataset to run searchlight on
# Initialize with 4x4x4 scans of random values on 30 frames
rand = np.random.RandomState(0)
frames = 30
data = rand.rand(5, 5, 5, frames)
mask = np.ones((5, 5, 5), np.bool)
mask_img = nibabel.Nifti1Image(mask.astype(np.int), np.eye(4))
# Create a condition array, with balanced classes
cond = np.arange(frames, dtype=int) >= (frames // 2)
# Create an activation pixel.
data[2, 2, 2, :] = 0
data[2, 2, 2][cond.astype(np.bool)] = 2
data_img = nibabel.Nifti1Image(data, np.eye(4))
# Define cross validation
from sklearn.model_selection import KFold
cv = KFold(n_splits=4)
n_jobs = 1
# Run Searchlight with different radii
# Small radius : only one pixel is selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img,
radius=0.5, n_jobs=n_jobs,
scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 1)
assert_equal(sl.scores_[2, 2, 2], 1.)
# The voxel selected in process_mask_img is too far from the signal
process_mask = np.zeros((5, 5, 5), np.bool)
process_mask[0, 0, 0] = True
process_mask_img = nibabel.Nifti1Image(process_mask.astype(np.int),
np.eye(4))
sl = searchlight.SearchLight(mask_img, process_mask_img=process_mask_img,
radius=0.5, n_jobs=n_jobs,
scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 0)
# Medium radius : little ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 7)
assert_equal(sl.scores_[2, 2, 2], 1.)
assert_equal(sl.scores_[1, 2, 2], 1.)
assert_equal(sl.scores_[2, 1, 2], 1.)
assert_equal(sl.scores_[2, 2, 1], 1.)
assert_equal(sl.scores_[3, 2, 2], 1.)
assert_equal(sl.scores_[2, 3, 2], 1.)
assert_equal(sl.scores_[2, 2, 3], 1.)
# Big radius : big ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=2,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert_equal(np.where(sl.scores_ == 1)[0].size, 33)
assert_equal(sl.scores_[2, 2, 2], 1.)
# group cross validation
try:
from sklearn.model_selection import LeaveOneGroupOut
gcv = LeaveOneGroupOut()
except ImportError:
# won't import model selection if it's not there.
# the groups variable should have no effect.
gcv = cv
groups = np.random.permutation(np.arange(frames, dtype=int) >
(frames // 2))
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=gcv)
sl.fit(data_img, cond, groups)
assert_equal(np.where(sl.scores_ == 1)[0].size, 7)
assert_equal(sl.scores_[2, 2, 2], 1.)
# adding superfluous group variable
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond, groups)
assert_equal(np.where(sl.scores_ == 1)[0].size, 7)
assert_equal(sl.scores_[2, 2, 2], 1.)
# Check whether searchlight works on list of 3D images
rand = np.random.RandomState(0)
data = rand.rand(5, 5, 5)
data_img = nibabel.Nifti1Image(data, affine=np.eye(4))
imgs = [data_img, data_img, data_img, data_img, data_img, data_img]
# labels
y = [0, 1, 0, 1, 0, 1]
# run searchlight on list of 3D images
sl = searchlight.SearchLight(mask_img)
sl.fit(imgs, y)
|
40,710 |
def _check_output_shapes(output: Any):
y_pred, y = output
if y_pred.shape != y.shape:
raise ValueError("Input data shapes should be the same, but given {} and {}".format(y_pred.shape, y.shape))
c1 = y_pred.ndimension() == 2 and y_pred.shape[1] == 1
if not (y_pred.ndimension() == 1 or c1):
raise ValueError("Input y_pred should have shape (N,) or (N, 1), but given {}".format(y_pred.shape))
c2 = y.ndimension() == 2 and y.shape[1] == 1
if not (y.ndimension() == 1 or c2):
raise ValueError("Input y should have shape (N,) or (N, 1), but given {}".format(y.shape))
|
def _check_output_shapes(output: Tuple[torch.Tensor, torch.Tensor]):
y_pred, y = output
if y_pred.shape != y.shape:
raise ValueError("Input data shapes should be the same, but given {} and {}".format(y_pred.shape, y.shape))
c1 = y_pred.ndimension() == 2 and y_pred.shape[1] == 1
if not (y_pred.ndimension() == 1 or c1):
raise ValueError("Input y_pred should have shape (N,) or (N, 1), but given {}".format(y_pred.shape))
c2 = y.ndimension() == 2 and y.shape[1] == 1
if not (y.ndimension() == 1 or c2):
raise ValueError("Input y should have shape (N,) or (N, 1), but given {}".format(y.shape))
|
46,303 |
def _validate_features(
features: Optional[
Union[Dict[str, Union[np.ndarray, pd.Series]], pd.DataFrame]
],
*,
num_data: Optional[int] = None,
) -> pd.DataFrame:
"""Validates and coerces a features table into a pandas DataFrame.
See Also
--------
:class:`_FeatureTable` : See initialization for parameter descriptions.
"""
if isinstance(features, pd.DataFrame):
features = features.reset_index(drop=True)
elif isinstance(features, dict):
# One needs to reset pd.Series object's indices as well
for key, value in features.copy().items():
if isinstance(value, pd.Series):
features[key] = value.reset_index(drop=True)
index = None if num_data is None else range(num_data)
return pd.DataFrame(data=features, index=index)
|
def _validate_features(
features: Optional[
Union[Dict[str, Union[np.ndarray, pd.Series]], pd.DataFrame]
],
*,
num_data: Optional[int] = None,
) -> pd.DataFrame:
"""Validates and coerces a features table into a pandas DataFrame.
See Also
--------
:class:`_FeatureTable` : See initialization for parameter descriptions.
"""
if isinstance(features, pd.DataFrame):
features = features.reset_index(drop=True)
elif isinstance(features, dict):
features = {key: np.array(value, copy=False) for key, value in features.items()}
index = None if num_data is None else range(num_data)
return pd.DataFrame(data=features, index=index)
|
54,412 |
def get_install_requires():
# type: () -> List[str]
return [
'alembic',
'cliff',
'colorlog',
'numpy',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'redis',
'tqdm',
'joblib',
]
|
def get_install_requires():
# type: () -> List[str]
return [
'alembic',
'cliff',
'colorlog',
'numpy',
'redis',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'tqdm',
'joblib',
]
|
32,398 |
def get_test_response(client, args):
"""
Test the integration connection state
:param client: instance of client to communicate with server
:param args: Parameters
:return: Test Response Success or Failure
"""
ret_val = 'Unable to Contact Feed Service, Please Check the parameters.'
args['begin'] = str((datetime.utcnow() - timedelta(days=1)).replace(tzinfo=pytz.UTC))
args['end'] = str(datetime.utcnow().replace(tzinfo=pytz.UTC))
try:
services = client.get_taxii(args)
except Exception as e:
demisto.error(e)
services = None
if services:
ret_val = 'ok'
return ret_val
|
def get_test_response(client: Client, args: Dict[str, Any]):
"""
Test the integration connection state
:param client: instance of client to communicate with server
:param args: Parameters
:return: Test Response Success or Failure
"""
ret_val = 'Unable to Contact Feed Service, Please Check the parameters.'
args['begin'] = str((datetime.utcnow() - timedelta(days=1)).replace(tzinfo=pytz.UTC))
args['end'] = str(datetime.utcnow().replace(tzinfo=pytz.UTC))
try:
services = client.get_taxii(args)
except Exception as e:
demisto.error(e)
services = None
if services:
ret_val = 'ok'
return ret_val
|
29,317 |
def _get_filepaths_from_non_other_shard(shard, namespace=None):
"""Get paths to lintable files in a shard besides the other shard.
This function applies some ignore rules (from .eslintignore) but not
all.
Args:
shard: str. Shard name.
namespace: multiprocessing.Namespace. Namespace in which to execute
this function.
Returns:
list(str). Paths to lintable files.
Raises:
RuntimeError. Invalid Shards because of a duplicate file.
AssertionError. A file duplicated across shards.
"""
filepaths = []
assert shard != OTHER_SHARD_NAME
for filepath in SHARDS[shard]:
filepaths.extend(
_get_filepaths_from_path(filepath, namespace=namespace))
if len(filepaths) != len(set(filepaths)):
# Shards are invalid because of a duplicate file.
for filepath in filepaths:
if filepaths.count(filepath) > 1:
raise RuntimeError(
'%s in multiple shards.' % filepath)
# We exempt this line from test coverage because it is
# un-testable. It should never be reached, but we raise an
# assertion error to catch coding errors above.
raise AssertionError( # pragma: no cover
'There is a file duplicated across shards. '
'We should have been able to find it but failed.')
return filepaths
|
def _get_filepaths_from_non_other_shard(shard, namespace=None):
"""Get paths to lintable files in a shard besides the other shard.
This function applies some ignore rules (from .eslintignore) but not
all.
Args:
shard: str. Shard name.
namespace: multiprocessing.Namespace. Namespace in which to execute
this function.
Returns:
list(str). Paths to lintable files.
Raises:
RuntimeError. Invalid shards because of a duplicate file.
AssertionError. A file duplicated across shards.
"""
filepaths = []
assert shard != OTHER_SHARD_NAME
for filepath in SHARDS[shard]:
filepaths.extend(
_get_filepaths_from_path(filepath, namespace=namespace))
if len(filepaths) != len(set(filepaths)):
# Shards are invalid because of a duplicate file.
for filepath in filepaths:
if filepaths.count(filepath) > 1:
raise RuntimeError(
'%s in multiple shards.' % filepath)
# We exempt this line from test coverage because it is
# un-testable. It should never be reached, but we raise an
# assertion error to catch coding errors above.
raise AssertionError( # pragma: no cover
'There is a file duplicated across shards. '
'We should have been able to find it but failed.')
return filepaths
|
43,466 |
def AmplitudeEmbedding(features, wires, pad):
r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``.
The absolute square of all elements in ``features`` has to add up to one.
.. note::
AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with
devices that implement this function.
Args:
features (array): Input array of shape ``(2**n,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
pad (Boolean): controls the activation of the padding option
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if pad==True and 2**len(wires) != len(features):
features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant')
if pad==False and 2**len(wires) != len(features):
raise ValueError("AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "
"got {}.".format(2 ** len(wires), len(features)))
if np.linalg.norm(features,2) != 1:
raise ValueError("AmplitudeEmbedding requires a normalized feature vector.")
QubitStateVector(features, wires=wires)
|
def AmplitudeEmbedding(features, wires, pad):
r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``.
The absolute square of all elements in ``features`` has to add up to one.
.. note::
AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with
devices that implement this function.
Args:
features (array): Input array of shape ``(2**n,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
pad (Boolean): controls the activation of the padding option
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if pad==True and 2**len(wires) != len(features):
features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant')
if pad==False and 2**len(wires) != len(features):
raise ValueError("AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "
"got {}.".format(2 ** len(wires), len(features)))
if np.linalg.norm(features,2) != 1:
raise ValueError("AmplitudeEmbedding requires a normalized feature vector.")
QubitStateVector(features, wires=wires)
|
53,770 |
def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1,
max_iterations=100, track_iterations=False, **kwargs):
'''
Iterative linear optimization updating the line parameters for passive
AC and DC lines. This is helpful when line expansion is enabled. After each
sucessful solving, line impedances and line resistance are recalculated
based on the optimization result. If warmstart is possible, it uses the
result from the previous iteration to fasten the optimization.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
msq_threshold: float, default 0.05
Maximal mean square difference between optimized line capacity of
the current and the previous iteration. As soon as this threshold is
undercut, and the number of iterations is bigger than 'min_iterations'
the iterative optimization stops
min_iterations : integer, default 1
Minimal number of iteration to run regardless whether the msq_threshold
is already undercut
max_iterations : integer, default 100
Maximal numbder of iterations to run regardless whether msq_threshold
is already undercut
track_iterations: bool, default False
If True, the intermediate branch capacities and values of the
objective function are recorded for each iteration. The values of
iteration 0 represent the initial state.
**kwargs
Keyword arguments of the lopf function which runs at each iteration
'''
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
ext_i = get_extendable_i(n, 'Line')
typed_i = n.lines.query('type != ""').index
ext_untyped_i = ext_i.difference(typed_i)
ext_typed_i = ext_i & typed_i
base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom))
n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i]
def update_line_params(n, s_nom_prev):
factor = n.lines.s_nom_opt / s_nom_prev
for attr, carrier in (('x', 'AC'), ('r', 'DC')):
ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i)
n.lines.loc[ln_i, attr] /= factor[ln_i]
ln_i = ext_i & typed_i
n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i]
def msq_diff(n, s_nom_prev):
lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \
n.lines['s_nom_opt'].mean()
logger.info(f"Mean square difference after iteration {iteration} is "
f"{lines_err}")
return lines_err
def save_optimal_capacities(n, iteration, status):
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt']
setattr(n, f"status_{iteration}", status)
setattr(n, f"objective_{iteration}", n.objective)
n.iteration = iteration
if track_iterations:
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}']
iteration = 1
kwargs['store_basis'] = True
diff = msq_threshold
while diff >= msq_threshold or iteration < min_iterations:
if iteration > max_iterations:
logger.info(f'Iteration {iteration} beyond max_iterations '
f'{max_iterations}. Stopping ...')
break
s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom
kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__()))
status, termination_condition = network_lopf(n, snapshots, **kwargs)
assert status == 'ok', (f'Optimization failed with status {status}'
f'and termination {termination_condition}')
if track_iterations:
save_optimal_capacities(n, iteration, status)
n.global_constraints = n.global_constraints.rename(columns={'mu': f'mu_{iteration}'})
final_duals = n.global_constraints.mu.copy() #save second last iteration of duals (needed for lv_limit duals)
update_line_params(n, s_nom_prev)
diff = msq_diff(n, s_nom_prev)
iteration += 1
logger.info('Running last lopf with fixed branches, overwrite p_nom '
'for links and s_nom for lines')
ext_links_i = get_extendable_i(n, 'Link')
n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False
n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False
kwargs['warmstart'] = False
network_lopf(n, snapshots, **kwargs)
if track_iterations:
n.global_constraints = n.global_constraints.rename(columns={'mu':f'mu_{iteration}'})
if 'lv_limit' in n.global_constraints.index:
logger.info(f'Resulting dual value for line volume limit (lv_limit) set to the one of iteration {iteration-1} '
'due to its optimisation process.') # otherwise, final dual for lv_limit is NaN.
if track_iterations: n.global_constraints.at['lv_limit', f'mu_{iteration}'] = final_duals['lv_limit']
else: n.global_constraints.at['lv_limit', f'mu'] = final_duals['lv_limit']
n.lines.loc[ext_i, 's_nom_extendable'] = True
n.links.loc[ext_links_i, 'p_nom_extendable'] = True
|
def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1,
max_iterations=100, track_iterations=False, **kwargs):
'''
Iterative linear optimization updating the line parameters for passive
AC and DC lines. This is helpful when line expansion is enabled. After each
sucessful solving, line impedances and line resistance are recalculated
based on the optimization result. If warmstart is possible, it uses the
result from the previous iteration to fasten the optimization.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
msq_threshold: float, default 0.05
Maximal mean square difference between optimized line capacity of
the current and the previous iteration. As soon as this threshold is
undercut, and the number of iterations is bigger than 'min_iterations'
the iterative optimization stops
min_iterations : integer, default 1
Minimal number of iteration to run regardless whether the msq_threshold
is already undercut
max_iterations : integer, default 100
Maximal numbder of iterations to run regardless whether msq_threshold
is already undercut
track_iterations: bool, default False
If True, the intermediate branch capacities and values of the
objective function are recorded for each iteration. The values of
iteration 0 represent the initial state.
**kwargs
Keyword arguments of the lopf function which runs at each iteration
'''
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
ext_i = get_extendable_i(n, 'Line')
typed_i = n.lines.query('type != ""').index
ext_untyped_i = ext_i.difference(typed_i)
ext_typed_i = ext_i & typed_i
base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom))
n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i]
def update_line_params(n, s_nom_prev):
factor = n.lines.s_nom_opt / s_nom_prev
for attr, carrier in (('x', 'AC'), ('r', 'DC')):
ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i)
n.lines.loc[ln_i, attr] /= factor[ln_i]
ln_i = ext_i & typed_i
n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i]
def msq_diff(n, s_nom_prev):
lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \
n.lines['s_nom_opt'].mean()
logger.info(f"Mean square difference after iteration {iteration} is "
f"{lines_err}")
return lines_err
def save_optimal_capacities(n, iteration, status):
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt']
setattr(n, f"status_{iteration}", status)
setattr(n, f"objective_{iteration}", n.objective)
n.iteration = iteration
if track_iterations:
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}']
iteration = 1
kwargs['store_basis'] = True
diff = msq_threshold
while diff >= msq_threshold or iteration < min_iterations:
if iteration > max_iterations:
logger.info(f'Iteration {iteration} beyond max_iterations '
f'{max_iterations}. Stopping ...')
break
s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom
kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__()))
status, termination_condition = network_lopf(n, snapshots, **kwargs)
assert status == 'ok', (f'Optimization failed with status {status}'
f'and termination {termination_condition}')
if track_iterations:
save_optimal_capacities(n, iteration, status)
n.global_constraints = n.global_constraints.rename(columns={'mu': f'mu_{iteration}'})
final_duals = n.global_constraints.mu.copy() #save second last iteration of duals (needed for lv_limit duals)
update_line_params(n, s_nom_prev)
diff = msq_diff(n, s_nom_prev)
iteration += 1
logger.info('Running last lopf with fixed branches, overwrite p_nom '
'for links and s_nom for lines')
ext_links_i = get_extendable_i(n, 'Link')
n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False
n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False
kwargs['warmstart'] = False
network_lopf(n, snapshots, **kwargs)
if track_iterations:
n.global_constraints = n.global_constraints.rename(columns={'mu':f'mu_{iteration}'})
if 'lv_limit' in n.global_constraints.index:
logger.info(f'Resulting dual value for line volume limit (lv_limit) set to the one of iteration {iteration-1} '
'due to its optimisation process.') # otherwise, final dual for lv_limit is NaN.
if track_iterations:
n.global_constraints.at['lv_limit', f'mu_{iteration}'] = final_duals['lv_limit']
else: n.global_constraints.at['lv_limit', f'mu'] = final_duals['lv_limit']
n.lines.loc[ext_i, 's_nom_extendable'] = True
n.links.loc[ext_links_i, 'p_nom_extendable'] = True
|
32,340 |
def upload_file_command( # type: ignore[return]
client: Client,
file: str,
environment_id: str = "160: Windows 10",
file_name: Optional[str] = None,
is_confidential: str = "true",
comment: str = "",
submit_file: str = "no",
action_script: str = "",
command_line: str = "",
document_password: str = "",
enable_tor: str = "false",
submit_name: str = "",
system_date: str = "",
system_time: str = ""
) -> CommandResults:
"""Upload a file for sandbox analysis.
:param client: the client object with an access token
:param file: content of the uploaded sample in binary format
:param file_name: name of the file
:param is_confidential: defines visibility of this file in Falcon MalQuery, either via the API or the Falcon console
:param comment: a descriptive comment to identify the file for other users
:param submit_file: if "yes" run cs-fx-submit-uploaded-file for the uploaded file
:param environment_id: specifies the sandbox environment used for analysis
:param action_script: runtime script for sandbox analysis
:param command_line: command line script passed to the submitted file at runtime
:param document_password: auto-filled for Adobe or Office files that prompt for a password
:param enable_tor: if true, sandbox analysis routes network traffic via TOR
:param submit_name: name of the malware sample that’s used for file type detection and analysis
:param system_date: set a custom date in the format yyyy-MM-dd for the sandbox environment
:param system_time: set a custom time in the format HH:mm for the sandbox environment.
:return: Demisto outputs when entry_context and responses are lists
"""
response = client.upload_file(file, file_name, is_confidential, comment)
resources_fields = ("file_name", "sha256")
result = parse_outputs(response, client.reliability, resources_fields=resources_fields)
if submit_file == 'no':
return CommandResults(
outputs_key_field='sha256',
outputs_prefix=OUTPUTS_PREFIX,
outputs=result.output,
readable_output=tableToMarkdown("CrowdStrike Falcon X response:", result.output),
raw_response=response,
)
else:
sha256 = str(result.output.get("sha256")) # type: ignore[union-attr]
return send_uploaded_file_to_sandbox_analysis_command(client, sha256, environment_id, action_script,
command_line, document_password, enable_tor,
submit_name, system_date, system_time)
|
def upload_file_command( # type: ignore[return]
client: Client,
file: str,
environment_id: str = "160: Windows 10",
file_name: Optional[str] = None,
is_confidential: str = "true",
comment: str = "",
submit_file: str = "no",
action_script: str = "",
command_line: str = "",
document_password: str = "",
enable_tor: str = "false",
submit_name: str = "",
system_date: str = "",
system_time: str = "",
) -> CommandResults:
"""Upload a file for sandbox analysis.
:param client: the client object with an access token
:param file: content of the uploaded sample in binary format
:param file_name: name of the file
:param is_confidential: defines visibility of this file in Falcon MalQuery, either via the API or the Falcon console
:param comment: a descriptive comment to identify the file for other users
:param submit_file: if "yes" run cs-fx-submit-uploaded-file for the uploaded file
:param environment_id: specifies the sandbox environment used for analysis
:param action_script: runtime script for sandbox analysis
:param command_line: command line script passed to the submitted file at runtime
:param document_password: auto-filled for Adobe or Office files that prompt for a password
:param enable_tor: if true, sandbox analysis routes network traffic via TOR
:param submit_name: name of the malware sample that’s used for file type detection and analysis
:param system_date: set a custom date in the format yyyy-MM-dd for the sandbox environment
:param system_time: set a custom time in the format HH:mm for the sandbox environment.
:return: Demisto outputs when entry_context and responses are lists
"""
response = client.upload_file(file, file_name, is_confidential, comment)
resources_fields = ("file_name", "sha256")
result = parse_outputs(response, client.reliability, resources_fields=resources_fields)
if submit_file == 'no':
return CommandResults(
outputs_key_field='sha256',
outputs_prefix=OUTPUTS_PREFIX,
outputs=result.output,
readable_output=tableToMarkdown("CrowdStrike Falcon X response:", result.output),
raw_response=response,
)
else:
sha256 = str(result.output.get("sha256")) # type: ignore[union-attr]
return send_uploaded_file_to_sandbox_analysis_command(client, sha256, environment_id, action_script,
command_line, document_password, enable_tor,
submit_name, system_date, system_time)
|
37,920 |
def load_libgmt(lib_fullnames=None):
"""
Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`.
Will look for the GMT shared library in the directories determined by
clib_full_names().
Parameters
----------
lib_fullnames : list of str or None
List of possible full names of GMT's shared library. If ``None``, will
default to ``clib_full_names()``.
Returns
-------
:py:class:`ctypes.CDLL` object
The loaded shared library.
Raises
------
GMTCLibNotFoundError
If there was any problem loading the library (couldn't find it or
couldn't access the functions).
"""
if lib_fullnames is None:
lib_fullnames = clib_full_names()
error = True
error_msg = []
failing_libs = []
for libname in lib_fullnames:
try:
if libname in failing_libs: # libname is known to fail, so skip it
continue
libgmt = ctypes.CDLL(libname)
check_libgmt(libgmt)
error = False
break
except (OSError, GMTCLibError) as err:
error_msg.append(
f"Error loading the GMT shared library '{libname}'.\n{err}"
)
failing_libs.append(libname)
if error:
raise GMTCLibNotFoundError("\n".join(error_msg))
return libgmt
|
def load_libgmt(lib_fullnames=None):
"""
Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`.
Will look for the GMT shared library in the directories determined by
clib_full_names().
Parameters
----------
lib_fullnames : list of str or None
List of possible full names of GMT's shared library. If ``None``, will
default to ``clib_full_names()``.
Returns
-------
:py:class:`ctypes.CDLL` object
The loaded shared library.
Raises
------
GMTCLibNotFoundError
If there was any problem loading the library (couldn't find it or
couldn't access the functions).
"""
if lib_fullnames is None:
lib_fullnames = clib_full_names()
error = True
error_msg = []
failing_libs = []
for libname in lib_fullnames:
try:
if libname in failing_libs: # libname is known to fail, so skip it
continue
libgmt = ctypes.CDLL(libname)
check_libgmt(libgmt)
error = False
break
except (OSError, GMTCLibError) as err:
error_msg.append(f"Error loading GMT shared library at '{libname}'.\n{err}")
failing_libs.append(libname)
if error:
raise GMTCLibNotFoundError("\n".join(error_msg))
return libgmt
|
39,587 |
def fini_dml_stmt(
ir_stmt: irast.MutatingStmt,
wrapper: pgast.Query,
parts: DMLParts,
*,
parent_ctx: context.CompilerContextLevel,
ctx: context.CompilerContextLevel,
) -> pgast.Query:
union_cte, union_rvar = gen_dml_union(ir_stmt, parts, ctx=ctx)
if len(parts.dml_ctes) > 1 or parts.else_cte:
ctx.toplevel_stmt.ctes.append(union_cte)
relctx.include_rvar(ctx.rel, union_rvar, ir_stmt.subject.path_id, ctx=ctx)
# Record the effect of this insertion in the relation overlay
# context to ensure that the RETURNING clause potentially
# referencing this class yields the expected results.
dml_stack = get_dml_stmt_stack(ir_stmt, ctx=ctx)
if isinstance(ir_stmt, irast.InsertStmt):
# The union CTE might have a SELECT from an ELSE clause, which
# we don't actually want to include.
assert len(parts.dml_ctes) == 1
cte = next(iter(parts.dml_ctes.values()))[0]
relctx.add_type_rel_overlay(
ir_stmt.subject.typeref, 'unIon', cte,
dml_stmts=dml_stack, path_id=ir_stmt.subject.path_id, ctx=ctx)
elif isinstance(ir_stmt, irast.DeleteStmt):
relctx.add_type_rel_overlay(
ir_stmt.subject.typeref, 'except', union_cte,
dml_stmts=dml_stack, path_id=ir_stmt.subject.path_id, ctx=ctx)
clauses.compile_output(ir_stmt.result, ctx=ctx)
clauses.fini_stmt(wrapper, ctx, parent_ctx)
return wrapper
|
def fini_dml_stmt(
ir_stmt: irast.MutatingStmt,
wrapper: pgast.Query,
parts: DMLParts,
*,
parent_ctx: context.CompilerContextLevel,
ctx: context.CompilerContextLevel,
) -> pgast.Query:
union_cte, union_rvar = gen_dml_union(ir_stmt, parts, ctx=ctx)
if len(parts.dml_ctes) > 1 or parts.else_cte:
ctx.toplevel_stmt.ctes.append(union_cte)
relctx.include_rvar(ctx.rel, union_rvar, ir_stmt.subject.path_id, ctx=ctx)
# Record the effect of this insertion in the relation overlay
# context to ensure that the RETURNING clause potentially
# referencing this class yields the expected results.
dml_stack = get_dml_stmt_stack(ir_stmt, ctx=ctx)
if isinstance(ir_stmt, irast.InsertStmt):
# The union CTE might have a SELECT from an ELSE clause, which
# we don't actually want to include.
assert len(parts.dml_ctes) == 1
cte = next(iter(parts.dml_ctes.values()))[0]
relctx.add_type_rel_overlay(
ir_stmt.subject.typeref, 'union', cte,
dml_stmts=dml_stack, path_id=ir_stmt.subject.path_id, ctx=ctx)
elif isinstance(ir_stmt, irast.DeleteStmt):
relctx.add_type_rel_overlay(
ir_stmt.subject.typeref, 'except', union_cte,
dml_stmts=dml_stack, path_id=ir_stmt.subject.path_id, ctx=ctx)
clauses.compile_output(ir_stmt.result, ctx=ctx)
clauses.fini_stmt(wrapper, ctx, parent_ctx)
return wrapper
|
14,020 |
def convert_to_wkb(gdf, geom_name):
"""Convert geometries to wkb. """
from shapely.wkb import dumps
gdf[geom_name] = gdf[geom_name].apply(lambda x: dumps(x, hex=True))
return gdf
|
def _convert_to_wkb(gdf, geom_name):
"""Convert geometries to wkb. """
from shapely.wkb import dumps
gdf[geom_name] = gdf[geom_name].apply(lambda x: dumps(x, hex=True))
return gdf
|
42,899 |
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
``internal_phase``.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
7,682 |
def create_event_request(event, category, comment=''):
assert event.category != category
if event.pending_move_request:
event.pending_move_request.withdraw()
req = EventMoveRequest(event=event, category=category, requestor=session.user, requestor_comment=comment)
db.session.flush()
logger.info('Category move request %r to %r created by %r', req, category, session.user)
sep = ' \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK} '
event.log(EventLogRealm.event, LogKind.change, 'Category', f'Move to "{category.title}" requested',
user=session.user, data={'Category ID': category.id, 'Category': sep.join(category.chain_titles),
'Comment': comment},
meta={'event_move_request_id': req.id})
category.log(CategoryLogRealm.events, LogKind.positive, 'Moderation', f'Event move requested: "{event.title}"',
session.user, data={
'Event ID': event.id,
'From': sep.join(event.category.chain_titles if event.category else 'Unlisted')
},
meta={'event_move_request_id': req.id})
return req
|
def create_event_request(event, category, comment=''):
assert event.category != category
if event.pending_move_request:
event.pending_move_request.withdraw()
req = EventMoveRequest(event=event, category=category, requestor=session.user, requestor_comment=comment)
db.session.flush()
logger.info('Category move request %r to %r created by %r', req, category, session.user)
sep = ' \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK} '
event.log(EventLogRealm.event, LogKind.change, 'Category', f'Move to "{category.title}" requested',
user=session.user, data={'Category ID': category.id, 'Category': sep.join(category.chain_titles),
'Comment': comment},
meta={'event_move_request_id': req.id})
category.log(CategoryLogRealm.events, LogKind.positive, 'Moderation', f'Event move requested: "{event.title}"',
session.user, data={
'Event ID': event.id,
'From': sep.join(event.category.chain_titles) if event.category else 'Unlisted'
},
meta={'event_move_request_id': req.id})
return req
|
10,637 |
def check_blocking_io():
"""Check stdin/stdout/stderr to make sure they are using blocking IO."""
handles = []
for handle in (sys.stdin, sys.stdout, sys.stderr):
# noinspection PyBroadException
try:
fd = handle.fileno()
except Exception:
continue # not a real file handle, such as during the import sanity test
if not os.get_blocking(fd):
handles.append(getattr(handle, 'name', None) or '#%s' % fd)
if handles:
raise SystemExit('Ansible requires blocking IO on stdin/stdout/stderr. Non-blocking file handles detected: %s' % ', '.join(_io for _io in handles))
|
def check_blocking_io():
"""Check stdin/stdout/stderr to make sure they are using blocking IO."""
handles = []
for handle in (sys.stdin, sys.stdout, sys.stderr):
# noinspection PyBroadException
try:
fd = handle.fileno()
except Exception:
continue # not a real file handle, such as during the import sanity test
if not os.get_blocking(fd):
handles.append(getattr(handle, 'name', None) or '#%s' % fd)
if handles:
raise SystemExit('ERROR: Ansible requires blocking IO on stdin/stdout/stderr. Non-blocking file handles detected: %s' % ', '.join(_io for _io in handles))
|
4,216 |
def test_xdawn_fit():
"""Test Xdawn fit."""
# Get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# =========== Basic Fit test =================
# Test base xdawn
xd = Xdawn(n_components=2, correct_overlap='auto')
xd.fit(epochs)
# With these parameters, the overlap correction must be False
assert not xd.correct_overlap_
# No overlap correction should give averaged evoked
evoked = epochs['cond2'].average()
assert_array_equal(evoked.data, xd.evokeds_['cond2'].data)
assert np.all(np.isclose(np.linalg.norm(xd.filters_['cond2'], axis=1), 1))
# ========== with signal cov provided ====================
# Provide covariance object
signal_cov = compute_raw_covariance(raw, picks=picks)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray
signal_cov = np.eye(len(picks))
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray of bad shape
signal_cov = np.eye(len(picks) - 1)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
pytest.raises(ValueError, xd.fit, epochs)
# Provide another type
signal_cov = 42
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
pytest.raises(ValueError, xd.fit, epochs)
# Fit with baseline correction and overlap correction should throw an
# error
# XXX This is a buggy test, the epochs here don't overlap
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=(None, 0), verbose=False)
xd = Xdawn(n_components=2, correct_overlap=True)
pytest.raises(ValueError, xd.fit, epochs)
|
def test_xdawn_fit():
"""Test Xdawn fit."""
# Get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# =========== Basic Fit test =================
# Test base xdawn
xd = Xdawn(n_components=2, correct_overlap='auto')
xd.fit(epochs)
# With these parameters, the overlap correction must be False
assert not xd.correct_overlap_
# No overlap correction should give averaged evoked
evoked = epochs['cond2'].average()
assert_array_equal(evoked.data, xd.evokeds_['cond2'].data)
assert_allclose(np.linalg.norm(xd.filters_['cond2'], axis=1), 1)
# ========== with signal cov provided ====================
# Provide covariance object
signal_cov = compute_raw_covariance(raw, picks=picks)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray
signal_cov = np.eye(len(picks))
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray of bad shape
signal_cov = np.eye(len(picks) - 1)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
pytest.raises(ValueError, xd.fit, epochs)
# Provide another type
signal_cov = 42
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
pytest.raises(ValueError, xd.fit, epochs)
# Fit with baseline correction and overlap correction should throw an
# error
# XXX This is a buggy test, the epochs here don't overlap
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=(None, 0), verbose=False)
xd = Xdawn(n_components=2, correct_overlap=True)
pytest.raises(ValueError, xd.fit, epochs)
|
5,058 |
def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h*2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h * 1 / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2 / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h * 1 / 3])
np.testing.assert_allclose(maxp, exp)
|
def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h*2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2 / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h * 1 / 3])
np.testing.assert_allclose(maxp, exp)
|
53,750 |
def _module() -> tf.Module:
class _Mod(tf.Module):
def __init__(self):
super().__init__()
self.var = tf.Variable(0.0)
self.param = Parameter(0.0)
assert len(_Mod().trainable_variables) == 2
assert len(_Mod().variables) == 2
return _Mod()
|
def _module() -> tf.Module:
class _Mod(tf.Module):
def __init__(self):
super().__init__()
self.var = tf.Variable(0.0)
self.param = Parameter(0.0)
module = _Mod()
assert len(module.trainable_variables) == 2
assert len(module.variables) == 2
return module
|
58,659 |
def raise_warning(
message: Text,
category: Optional[Type[Warning]] = None,
docs: Optional[Text] = None,
**kwargs: Any,
) -> None:
"""Emit a `warnings.warn` with sensible defaults and a colored warning msg."""
original_formatter = warnings.formatwarning
def should_show_source_line() -> bool:
if "stacklevel" not in kwargs:
if category == UserWarning or category is None:
return False
if category == FutureWarning:
return False
return True
def formatwarning(
message: Text,
category: Optional[Type[Warning]],
filename: Text,
lineno: Optional[int],
line: Optional[Text] = None,
):
"""Function to format a warning the standard way."""
if not should_show_source_line():
if docs:
line = f"More info at {docs}"
else:
line = ""
formatted_message = original_formatter(
message, category, filename, lineno, line
)
return utils.wrap_with_color(formatted_message, color=bcolors.WARNING)
if "stacklevel" not in kwargs:
# try to set useful defaults for the most common warning categories
if category == DeprecationWarning:
kwargs["stacklevel"] = 3
elif category == UserWarning:
kwargs["stacklevel"] = 2
elif category == FutureWarning:
kwargs["stacklevel"] = 2
warnings.formatwarning = formatwarning
warnings.warn(message, category=category, **kwargs)
warnings.formatwarning = original_formatter
|
def raise_warning(
message: Text,
category: Optional[Type[Warning]] = None,
docs: Optional[Text] = None,
**kwargs: Any,
) -> None:
"""Emit a `warnings.warn` with sensible defaults and a colored warning msg."""
original_formatter = warnings.formatwarning
def should_show_source_line() -> bool:
if "stacklevel" not in kwargs:
if category == UserWarning or category is None:
return False
if category == FutureWarning:
return False
return True
def formatwarning(
message: Text,
category: Optional[Type[Warning]],
filename: Text,
lineno: Optional[int],
line: Optional[Text] = None,
):
"""Function to format a warning the standard way."""
if not should_show_source_line():
if docs:
line = f"More info at {docs}"
else:
line = ""
formatted_message = original_formatter(
message, category, filename, lineno, line
)
return utils.wrap_with_color(formatted_message, color=bcolors.WARNING)
if "stacklevel" not in kwargs:
# try to set useful defaults for the most common warning categories
if category == DeprecationWarning:
kwargs["stacklevel"] = 3
elif category in (UserWarning, FutureWarning):
kwargs["stacklevel"] = 2
elif category == FutureWarning:
kwargs["stacklevel"] = 2
warnings.formatwarning = formatwarning
warnings.warn(message, category=category, **kwargs)
warnings.formatwarning = original_formatter
|
23,684 |
def fedis(aoi, surface_tilt, n=1.5, n_ref=1.4585):
"""
Determine the incidence angle modifiers (iam) for direct, diffuse sky,
and ground-reflected radiation using the FEDIS transmittance model.
The "Fresnel Equations" for Diffuse radiation on Inclined photovoltaic
Surfaces (FEDIS) [1]_ is an analytical solution of diffuse transmission
based on the rigorous integration of an alternate form of the
Fresnel equations. The approach leads to a simple yet accurate
relative transmittance model that reconciles the solar energy
sensed by pyranometers and PV panels.
Parameters
----------
aoi : numeric
Angle of incidence. [degrees]
surface_tilt : numeric
Surface tilt angle measured from horizontal (e.g. surface facing
up = 0, surface facing horizon = 90). [degrees]
n : float, default 1.5
Refractive index of the PV cover. The default value of 1.5
was used for an IMT reference cell in [1]_. [unitless]
n_ref : float, default 1.4585
Refractive index of the pyranometer cover. The default value
was used for a fused silica dome over a CMP22 in [1]_.
Returns
-------
iam : dict
IAM values for each type of irradiance:
* 'direct': radiation from the solar disc
* 'sky': radiation from the sky dome (zenith <= 90)
* 'ground': radiation reflected from the ground (zenith >= 90)
Notes
-----
This implementation corrects a typo in the reference regarding the sign
of the last polynomial term in Equation 5.
References
----------
.. [1] Xie, Y., M. Sengupta, A. Habte, A. Andreas, "The 'Fresnel Equations'
for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS)",
Renewable and Sustainable Energy Reviews, vol. 161, 112362. June 2022.
:doi:`10.1016/j.rser.2022.112362`
"""
# avoid undefined results for horizontal or upside-down surfaces
zeroang = 1e-06
surface_tilt = np.where(surface_tilt == 0, zeroang, surface_tilt)
surface_tilt = np.where(surface_tilt >= 90, 90 - zeroang, surface_tilt)
# and for aoi:
aoi = np.where(aoi <= 0, zeroang, aoi)
# similar for AOI > 90
aoi = np.where(aoi >= 90, 90 - zeroang, aoi)
# angle between module normal and refracted ray:
theta_0tp = asind(sind(aoi) / n) # Eq 3c
# reflectance of direct radiation on PV cover:
sin_term = sind(aoi - theta_0tp)**2 / sind(aoi + theta_0tp)**2 / 2
tan_term = tand(aoi - theta_0tp)**2 / tand(aoi + theta_0tp)**2 / 2
rd = sin_term + tan_term # Eq 3b
# reflectance on pyranometer cover:
r0 = ((n_ref-1.0)/(n_ref+1.0))**2.0 # Eq 3e
# relative transmittance of direct radiation by PV cover:
cd = (1 - rd) / (1 - r0) # Eq 3a
# weighting function
term1 = n*(n_ref+1)**2 / (n_ref*(n+1)**2)
# note: the last coefficient here differs in sign from the reference
polycoeffs = [2.77526e-09, 3.74953, -5.18727, 3.41186, -1.08794, 0.136060]
term2 = np.polynomial.polynomial.polyval(n, polycoeffs)
w = term1 * term2 # Eq 5
# relative transmittance of sky diffuse radiation by PV cover:
cosB = cosd(surface_tilt)
sinB = sind(surface_tilt)
cuk = (2*w / (np.pi * (1 + cosB))) * (
(30/7)*np.pi - (160/21)*np.radians(surface_tilt) - (10/3)*np.pi*cosB
+ (160/21)*cosB*sinB - (5/3)*np.pi*cosB*sinB**2 + (20/7)*cosB*sinB**3
- (5/16)*np.pi*cosB*sinB**4 + (16/105)*cosB*sinB**5
) # Eq 4
# relative transmittance of ground-reflected radiation by PV cover:
cug = 40 * w / (21 * (1 - cosB)) - (1 + cosB) / (1 - cosB) * cuk # Eq 6
# handle tilt=0 case correctly:
cug = np.where(surface_tilt == zeroang, 0, cug)
out = {
'direct': cd,
'sky': cuk,
'ground': cug,
}
return out
|
def fedis(aoi, surface_tilt, n=1.5, n_ref=1.4585):
"""
Determine the incidence angle modifiers (IAM) for direct, diffuse sky,
and ground-reflected radiation using the FEDIS transmittance model.
The "Fresnel Equations" for Diffuse radiation on Inclined photovoltaic
Surfaces (FEDIS) [1]_ is an analytical solution of diffuse transmission
based on the rigorous integration of an alternate form of the
Fresnel equations. The approach leads to a simple yet accurate
relative transmittance model that reconciles the solar energy
sensed by pyranometers and PV panels.
Parameters
----------
aoi : numeric
Angle of incidence. [degrees]
surface_tilt : numeric
Surface tilt angle measured from horizontal (e.g. surface facing
up = 0, surface facing horizon = 90). [degrees]
n : float, default 1.5
Refractive index of the PV cover. The default value of 1.5
was used for an IMT reference cell in [1]_. [unitless]
n_ref : float, default 1.4585
Refractive index of the pyranometer cover. The default value
was used for a fused silica dome over a CMP22 in [1]_.
Returns
-------
iam : dict
IAM values for each type of irradiance:
* 'direct': radiation from the solar disc
* 'sky': radiation from the sky dome (zenith <= 90)
* 'ground': radiation reflected from the ground (zenith >= 90)
Notes
-----
This implementation corrects a typo in the reference regarding the sign
of the last polynomial term in Equation 5.
References
----------
.. [1] Xie, Y., M. Sengupta, A. Habte, A. Andreas, "The 'Fresnel Equations'
for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS)",
Renewable and Sustainable Energy Reviews, vol. 161, 112362. June 2022.
:doi:`10.1016/j.rser.2022.112362`
"""
# avoid undefined results for horizontal or upside-down surfaces
zeroang = 1e-06
surface_tilt = np.where(surface_tilt == 0, zeroang, surface_tilt)
surface_tilt = np.where(surface_tilt >= 90, 90 - zeroang, surface_tilt)
# and for aoi:
aoi = np.where(aoi <= 0, zeroang, aoi)
# similar for AOI > 90
aoi = np.where(aoi >= 90, 90 - zeroang, aoi)
# angle between module normal and refracted ray:
theta_0tp = asind(sind(aoi) / n) # Eq 3c
# reflectance of direct radiation on PV cover:
sin_term = sind(aoi - theta_0tp)**2 / sind(aoi + theta_0tp)**2 / 2
tan_term = tand(aoi - theta_0tp)**2 / tand(aoi + theta_0tp)**2 / 2
rd = sin_term + tan_term # Eq 3b
# reflectance on pyranometer cover:
r0 = ((n_ref-1.0)/(n_ref+1.0))**2.0 # Eq 3e
# relative transmittance of direct radiation by PV cover:
cd = (1 - rd) / (1 - r0) # Eq 3a
# weighting function
term1 = n*(n_ref+1)**2 / (n_ref*(n+1)**2)
# note: the last coefficient here differs in sign from the reference
polycoeffs = [2.77526e-09, 3.74953, -5.18727, 3.41186, -1.08794, 0.136060]
term2 = np.polynomial.polynomial.polyval(n, polycoeffs)
w = term1 * term2 # Eq 5
# relative transmittance of sky diffuse radiation by PV cover:
cosB = cosd(surface_tilt)
sinB = sind(surface_tilt)
cuk = (2*w / (np.pi * (1 + cosB))) * (
(30/7)*np.pi - (160/21)*np.radians(surface_tilt) - (10/3)*np.pi*cosB
+ (160/21)*cosB*sinB - (5/3)*np.pi*cosB*sinB**2 + (20/7)*cosB*sinB**3
- (5/16)*np.pi*cosB*sinB**4 + (16/105)*cosB*sinB**5
) # Eq 4
# relative transmittance of ground-reflected radiation by PV cover:
cug = 40 * w / (21 * (1 - cosB)) - (1 + cosB) / (1 - cosB) * cuk # Eq 6
# handle tilt=0 case correctly:
cug = np.where(surface_tilt == zeroang, 0, cug)
out = {
'direct': cd,
'sky': cuk,
'ground': cug,
}
return out
|
31,486 |
def command_test_module(credentials: Dict) -> str:
message: str = ''
try:
api = CBCloudAPI(**credentials)
api.api_json_request(method='GET', uri='/integrationServices/v3/cblr/session/')
message = 'ok'
except errors.UnauthorizedError:
message = 'Authorization Error: Check your API Credentials'
except Exception as e:
exception_str = str(e)
if 'connection error' in exception_str:
message = 'Connection Error: Check your Server Url'
else:
raise e
return message
|
def command_test_module(credentials: Dict) -> str:
message: str = ''
try:
api = CBCloudAPI(**credentials)
api.api_json_request(method='GET', uri='/integrationServices/v3/cblr/session/')
message = 'ok'
except errors.UnauthorizedError:
message = 'Authorization Error: Check your API Credentials'
except Exception as e:
exception_str = str(e)
if 'connection error' in exception_str:
return_error('Connection Error: Check your Server URL')
else:
raise e
return message
|
7,024 |
def load(config, additional_plugins=None):
additional_plugins = additional_plugins or []
entry_points = {
entry_point.name: entry_point
for entry_point in
pkg_resources.iter_entry_points('main_loop')
}
plugins = {
'state': {},
'timings': {}
}
for plugin_name in config['plugins'] + additional_plugins:
# get plugin
try:
module_name = entry_points[plugin_name.replace(' ', '_')]
except KeyError:
raise UserInputError(
f'No main-loop plugin: "{plugin_name}"\n'
+ ' Available plugins:\n'
+ indent('\n'.join(sorted(entry_points)), ' ')
)
# load plugin
try:
module = module_name.load()
except Exception:
raise CylcError(f'Could not load plugin: "{plugin_name}"')
# load coroutines
log = []
for coro_name, coro in (
(coro_name, coro)
for coro_name, coro in getmembers(module)
if isfunction(coro)
if hasattr(coro, 'main_loop')
):
log.append(coro_name)
plugins.setdefault(
coro.main_loop, {}
)[(plugin_name, coro_name)] = coro
plugins['timings'][(plugin_name, coro_name)] = deque(maxlen=1)
LOG.debug(
'Loaded main loop plugin "%s": %s',
plugin_name + '\n',
'\n'.join((f'* {x}' for x in log))
)
# set the initial state of the plugin
plugins['state'][plugin_name] = {}
# make a note of the config here for ease of reference
plugins['config'] = config
return plugins
|
def load(config, additional_plugins=None):
additional_plugins = additional_plugins or []
entry_points = {
entry_point.name: entry_point
for entry_point in
pkg_resources.iter_entry_points('cylc.main_loop')
}
plugins = {
'state': {},
'timings': {}
}
for plugin_name in config['plugins'] + additional_plugins:
# get plugin
try:
module_name = entry_points[plugin_name.replace(' ', '_')]
except KeyError:
raise UserInputError(
f'No main-loop plugin: "{plugin_name}"\n'
+ ' Available plugins:\n'
+ indent('\n'.join(sorted(entry_points)), ' ')
)
# load plugin
try:
module = module_name.load()
except Exception:
raise CylcError(f'Could not load plugin: "{plugin_name}"')
# load coroutines
log = []
for coro_name, coro in (
(coro_name, coro)
for coro_name, coro in getmembers(module)
if isfunction(coro)
if hasattr(coro, 'main_loop')
):
log.append(coro_name)
plugins.setdefault(
coro.main_loop, {}
)[(plugin_name, coro_name)] = coro
plugins['timings'][(plugin_name, coro_name)] = deque(maxlen=1)
LOG.debug(
'Loaded main loop plugin "%s": %s',
plugin_name + '\n',
'\n'.join((f'* {x}' for x in log))
)
# set the initial state of the plugin
plugins['state'][plugin_name] = {}
# make a note of the config here for ease of reference
plugins['config'] = config
return plugins
|
30,069 |
def main():
arguments = docopt(
__doc__.format(
default_config_dir=constants.config_dir,
default_root=constants.install_root,
default_repository=constants.repository_base,
default_state_dir_root=constants.STATE_DIR_ROOT,
),
)
umask(0o022)
# NOTE: Changing root or repository will likely break actually running packages.
install = Install(
os.path.abspath(arguments['--root']),
os.path.abspath(arguments['--config-dir']),
arguments['--rooted-systemd'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'],
manage_users=True,
add_users=not os.path.exists('/etc/mesosphere/manual_host_users'),
manage_state_dir=True,
state_dir_root=os.path.abspath(arguments['--state-dir-root']))
repository = Repository(os.path.abspath(arguments['--repository']))
try:
if arguments['setup']:
actions.setup(install, repository)
sys.exit(0)
if arguments['list']:
print_repo_list(repository.list())
sys.exit(0)
if arguments['active']:
for pkg in sorted(install.get_active()):
print(pkg)
sys.exit(0)
if arguments['show']:
actions.show_package(repository, arguments['<package-id>'])
sys.exit(0)
if arguments['add']:
actions.add_package_file(repository, arguments['<package-tarball>'])
sys.exit(0)
if arguments['fetch']:
for package_id in arguments['<id>']:
actions.fetch_package(
repository,
arguments['--repository-url'],
package_id,
os.getcwd())
sys.exit(0)
if arguments['activate']:
actions.activate_packages(
install,
repository,
arguments['<id>'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'])
sys.exit(0)
if arguments['swap']:
actions.swap_active_package(
install,
repository,
arguments['<package-id>'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'])
sys.exit(0)
if arguments['remove']:
for package_id in arguments['<id>']:
try:
actions.remove_package(install, repository, package_id)
except PackageNotFound:
pass
sys.exit(0)
if arguments['uninstall']:
uninstall(install, repository)
sys.exit(0)
if arguments['check']:
checks = find_checks(install, repository)
if arguments['--list']:
list_checks(checks)
sys.exit(0)
# Run all checks
sys.exit(run_checks(checks, install, repository))
except ValidationError as ex:
print("Validation Error: {0}".format(ex), file=sys.stderr)
sys.exit(1)
except PackageError as ex:
print("Package Error: {0}".format(ex), file=sys.stderr)
sys.exit(1)
except Exception as ex:
print("ERROR: {0}".format(ex), file=sys.stderr)
sys.exit(1)
print("unknown command", file=sys.stderr)
sys.exit(1)
|
def main():
arguments = docopt(
__doc__.format(
default_config_dir=constants.config_dir,
default_root=constants.install_root,
default_repository=constants.repository_base,
default_state_dir_root=constants.STATE_DIR_ROOT,
),
)
umask(0o022)
# NOTE: Changing root or repository will likely break actually running packages.
install = Install(
os.path.abspath(arguments['--root']),
os.path.abspath(arguments['--config-dir']),
arguments['--rooted-systemd'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'],
manage_users=True,
add_users=not os.path.exists('/etc/mesosphere/manual_host_users'),
manage_state_dir=True,
state_dir_root=os.path.abspath(arguments['--state-dir-root']))
repository = Repository(os.path.abspath(arguments['--repository']))
try:
if arguments['setup']:
actions.setup(install, repository)
sys.exit(0)
if arguments['list']:
print_repo_list(repository.list())
sys.exit(0)
if arguments['active']:
for pkg in sorted(install.get_active()):
print(pkg)
sys.exit(0)
if arguments['show']:
print(json.dumps(repository.load(arguments['<package-id>']).pkginfo, indent=4))
sys.exit(0)
if arguments['add']:
actions.add_package_file(repository, arguments['<package-tarball>'])
sys.exit(0)
if arguments['fetch']:
for package_id in arguments['<id>']:
actions.fetch_package(
repository,
arguments['--repository-url'],
package_id,
os.getcwd())
sys.exit(0)
if arguments['activate']:
actions.activate_packages(
install,
repository,
arguments['<id>'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'])
sys.exit(0)
if arguments['swap']:
actions.swap_active_package(
install,
repository,
arguments['<package-id>'],
not arguments['--no-systemd'],
not arguments['--no-block-systemd'])
sys.exit(0)
if arguments['remove']:
for package_id in arguments['<id>']:
try:
actions.remove_package(install, repository, package_id)
except PackageNotFound:
pass
sys.exit(0)
if arguments['uninstall']:
uninstall(install, repository)
sys.exit(0)
if arguments['check']:
checks = find_checks(install, repository)
if arguments['--list']:
list_checks(checks)
sys.exit(0)
# Run all checks
sys.exit(run_checks(checks, install, repository))
except ValidationError as ex:
print("Validation Error: {0}".format(ex), file=sys.stderr)
sys.exit(1)
except PackageError as ex:
print("Package Error: {0}".format(ex), file=sys.stderr)
sys.exit(1)
except Exception as ex:
print("ERROR: {0}".format(ex), file=sys.stderr)
sys.exit(1)
print("unknown command", file=sys.stderr)
sys.exit(1)
|
14,883 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Openhome platform."""
if not discovery_info:
return True
if DATA_OPENHOME not in hass.data:
hass.data[DATA_OPENHOME] = []
name = discovery_info.get("name")
description = discovery_info.get("ssdp_description")
_LOGGER.info("Openhome device found: %s", name)
device = Device(description)
# if device has already been discovered
if device.Uuid() in [x.unique_id for x in hass.data[DATA_OPENHOME]]:
return True
hass_device = OpenhomeDevice(hass, device)
add_entities([hass_device], True)
hass.data[DATA_OPENHOME].append(hass_device)
def service_handle(call):
"""Handle the service call."""
entity_id = call.data.get(ATTR_ENTITY_ID)
device = next(
[
device
for device in hass.data[DATA_OPENHOME]
if device.entity_id == entity_id
].__iter__(),
None,
)
if not device:
return
if call.service == SERVICE_INVOKE_PIN:
index = call.data.get(ATTR_PIN_INDEX)
_LOGGER.info("Openhome invoking pin %s on %s", index, entity_id)
if index:
device.invoke_pin(index)
hass.services.register(
DOMAIN, SERVICE_INVOKE_PIN, service_handle, schema=OPENHOME_PIN_SCHEMA
)
return True
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Openhome platform."""
if not discovery_info:
return True
if DATA_OPENHOME not in hass.data:
hass.data[DATA_OPENHOME] = []
name = discovery_info.get("name")
description = discovery_info.get("ssdp_description")
_LOGGER.info("Openhome device found: %s", name)
device = Device(description)
# if device has already been discovered
if device.Uuid() in [x.unique_id for x in hass.data[DATA_OPENHOME]]:
return True
hass_device = OpenhomeDevice(hass, device)
async_add_entities([entity], True)
hass.data[DATA_OPENHOME].append(hass_device)
def service_handle(call):
"""Handle the service call."""
entity_id = call.data.get(ATTR_ENTITY_ID)
device = next(
[
device
for device in hass.data[DATA_OPENHOME]
if device.entity_id == entity_id
].__iter__(),
None,
)
if not device:
return
if call.service == SERVICE_INVOKE_PIN:
index = call.data.get(ATTR_PIN_INDEX)
_LOGGER.info("Openhome invoking pin %s on %s", index, entity_id)
if index:
device.invoke_pin(index)
hass.services.register(
DOMAIN, SERVICE_INVOKE_PIN, service_handle, schema=OPENHOME_PIN_SCHEMA
)
return True
|
37,000 |
def make_check_retry_fn(
fallback_retry_fn: CheckRetryFnType,
check_fn: Callable[[Exception], Optional[bool]],
check_timedelta: Optional[timedelta] = None,
) -> CheckRetryFnType:
"""Return a check_retry_fn which can be used by lib.Retry().
Arguments:
fallback_fn: Use this function if check_fn didn't decide if a retry should happen.
check_fn: Function which returns bool if retry should happen or None if unsure.
check_timedelta: Optional retry timeout if we check_fn matches the exception
"""
def check_retry_fn(e: Exception) -> Union[bool, timedelta]:
check = check_fn(e)
if check is None:
return fallback_retry_fn(e)
if check is False:
return False
if check_timedelta:
return check_timedelta
return True
return check_retry_fn
|
def make_check_retry_fn(
fallback_retry_fn: CheckRetryFnType,
check_fn: Callable[[Exception], Literal["retry", "raise", "fallback"]],
check_timedelta: Optional[timedelta] = None,
) -> CheckRetryFnType:
"""Return a check_retry_fn which can be used by lib.Retry().
Arguments:
fallback_fn: Use this function if check_fn didn't decide if a retry should happen.
check_fn: Function which returns bool if retry should happen or None if unsure.
check_timedelta: Optional retry timeout if we check_fn matches the exception
"""
def check_retry_fn(e: Exception) -> Union[bool, timedelta]:
check = check_fn(e)
if check is None:
return fallback_retry_fn(e)
if check is False:
return False
if check_timedelta:
return check_timedelta
return True
return check_retry_fn
|
13,069 |
def from_global_id_or_error(
id: str, only_type: Union[ObjectType, str] = None, raise_error: bool = False
):
"""Resolve database ID from global ID or raise GraphQLError.
Optionally validate the object type, if `only_type` is provided,
raise GraphQLError when `raise_error` is set to True.
"""
try:
_type, _id = graphene.Node.from_global_id(id)
except (binascii.Error, UnicodeDecodeError, ValueError):
raise GraphQLError(f"Couldn't resolve id: {id}.")
if _type == APP_ID_PREFIX:
_id = id
else:
try:
int(_id)
except ValueError:
try:
UUID(_id)
except (AttributeError, ValueError):
raise GraphQLError(f"Error occurred during ID - {id} validation.")
if only_type and str(_type) != str(only_type):
if not raise_error:
return _type, None
raise GraphQLError(f"Must receive a {only_type} id.")
return _type, _id
|
def from_global_id_or_error(
id: str, only_type: Union[ObjectType, str] = None, raise_error: bool = False
):
"""Resolve global ID or raise GraphQLError.
Optionally validate the object type, if `only_type` is provided,
raise GraphQLError when `raise_error` is set to True.
"""
try:
_type, _id = graphene.Node.from_global_id(id)
except (binascii.Error, UnicodeDecodeError, ValueError):
raise GraphQLError(f"Couldn't resolve id: {id}.")
if _type == APP_ID_PREFIX:
_id = id
else:
try:
int(_id)
except ValueError:
try:
UUID(_id)
except (AttributeError, ValueError):
raise GraphQLError(f"Error occurred during ID - {id} validation.")
if only_type and str(_type) != str(only_type):
if not raise_error:
return _type, None
raise GraphQLError(f"Must receive a {only_type} id.")
return _type, _id
|
7,314 |
def crop(image, bounding_box, axis=None):
"""Cropping images from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
Bounding_box : list of 2-tuple (x, y) where x < y.
Bounding box.
axis : tuple, optional
Axis order for cropping.
if provided, same legth as bounding_box.
Default: None
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty legth of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
|
def crop(image, bounding_box, axis=None):
"""Cropping images from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
Bounding_box : list of 2-tuple (x, y) where x < y.
Bounding box.
axis : tuple, optional
Axis order for cropping.
If provided, same length as bounding_box.
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty legth of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
|
8,107 |
def _rotation_matrix_hcc_to_hgs(longitude, latitude):
# Returns the rotation matrix from HCC to HGS based on the observer longitude and latitude
# Permute the axes of HCC to match HGS Cartesian equivalent
# HGS_X = HCC_Z
# HGS_Y = HCC_X
# HGS_Z = HCC_Y
axes_matrix = np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
# Rotate in latitude and longitude (sign difference because of direction difference)
lat_matrix = rotation_matrix(latitude, 'y')
lon_matrix = rotation_matrix(-longitude, 'z')
return matrix_product(matrix_product(lon_matrix, lat_matrix), axes_matrix)
|
def _rotation_matrix_hcc_to_hgs(longitude, latitude):
# Returns the rotation matrix from HCC to HGS based on the observer longitude and latitude
# Permute the axes of HCC to match HGS Cartesian equivalent
# HGS_X = HCC_Z
# HGS_Y = HCC_X
# HGS_Z = HCC_Y
axes_matrix = np.array([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
# Rotate in latitude and longitude (sign difference because of direction difference)
lat_matrix = rotation_matrix(latitude, 'y')
lon_matrix = rotation_matrix(-longitude, 'z')
return lon_matrix @ lat_matrix @ axes_matrix
|
22,341 |
def seconds_to_str(value):
"""Convert seconds to a simple simple string describing the amount of time."""
mins, secs = divmod(value, 60)
hours, mins = divmod(mins, 60)
if value < 60:
return "%s seconds" % secs
elif value < 3600:
return "%s minutes" % mins
else:
return "%s hours and %s minutes" % (hours, mins)
|
def seconds_to_str(value):
"""Convert seconds to a simple simple string describing the amount of time."""
mins, secs = divmod(value, 60)
hours, mins = divmod(mins, 60)
if value < 60:
return "%s seconds" % secs
elif value < 3600:
return "%s minutes" % mins
else:
return f"{hours} hours and {mins} minutes"
|
48,469 |
def get_best_parsable_locale(module, preferences=None):
'''
Attempts to return the best possible locale for parsing output in english
useful for scraping output with i18n tools
:param module: an AnsibleModule instance
:param preferences: A list of prefered locales, in order of preference
:returns: The first matched prefered locale or 'C' which is the default
'''
found = 'C' # default posix, its ascii but always there
if preferences is None:
# new posix standard or english cause those are messages core team expects
# yes, last 2 are same but some systems are weird
preferences = ['C.utf8', 'en_US.utf8', 'C', 'POSIX']
rc, out, err = module.run_command(['locale', '-a'])
if rc == 0:
if out:
available = out.strip().splitlines()
else:
module.warn("No output from locale, defaulting to C, rc=%s: %s" % (rc, to_native(err)))
else:
module.warn("Unable to get locale information, defaulting to C, rc=%s: %s" % (rc, to_native(err)))
if available:
for pref in preferences:
if pref in available:
found = pref
break
return found
|
def get_best_parsable_locale(module, preferences=None):
'''
Attempts to return the best possible locale for parsing output in English
useful for scraping output with i18n tools
:param module: an AnsibleModule instance
:param preferences: A list of preferred locales, in order of preference
:returns: The first matched preferred locale or 'C' which is the default
'''
found = 'C' # default posix, its ascii but always there
if preferences is None:
# new posix standard or english cause those are messages core team expects
# yes, last 2 are same but some systems are weird
preferences = ['C.utf8', 'en_US.utf8', 'C', 'POSIX']
rc, out, err = module.run_command(['locale', '-a'])
if rc == 0:
if out:
available = out.strip().splitlines()
else:
module.warn("No output from locale, defaulting to C, rc=%s: %s" % (rc, to_native(err)))
else:
module.warn("Unable to get locale information, defaulting to C, rc=%s: %s" % (rc, to_native(err)))
if available:
for pref in preferences:
if pref in available:
found = pref
break
return found
|
40,542 |
def get_custom_locations_oid(cmd, cl_oid):
try:
sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx)
sub_filters = []
sub_filters.append("displayName eq '{}'".format("Custom Locations RP"))
result = list(sp_graph_client.list(filter=(' and '.join(sub_filters))))
if len(result) != 0:
if cl_oid is not None and cl_oid != result[0].object_id:
logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id))
return result[0].object_id # Using the fetched OID
if cl_oid is None:
logger.warning("Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature.")
telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
return ""
else:
return cl_oid
except Exception as e:
log_string = "Unable to fetch oid of 'custom-locations' app. "
telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
if cl_oid:
log_string += "Proceeding with the OID passed to enable the 'custom-locations' feature."
logger.warning(log_string)
return cl_oid
log_string += "Proceeding without enabling the feature. " + str(e)
logger.warning(log_string)
return ""
|
def get_custom_locations_oid(cmd, cl_oid):
try:
sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx)
sub_filters = []
sub_filters.append("displayName eq '{}'".format("Custom Locations RP"))
result = list(sp_graph_client.list(filter=(' and '.join(sub_filters))))
if len(result) != 0:
if cl_oid is not None and cl_oid != result[0].object_id:
logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id))
return result[0].object_id # Using the fetched OID
if cl_oid is None:
logger.warning("Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature.")
telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
return ""
else:
return cl_oid
except Exception as e:
log_string = "Unable to fetch the Object ID of the Azure AD application used by Azure Arc service. "
telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
if cl_oid:
log_string += "Proceeding with the OID passed to enable the 'custom-locations' feature."
logger.warning(log_string)
return cl_oid
log_string += "Proceeding without enabling the feature. " + str(e)
logger.warning(log_string)
return ""
|
2,463 |
def _fetch_remote(remote, dirname=None, n_retries=3, delay=1):
"""Helper function to download a remote dataset into path
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 Checksum of the
downloaded file.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum
dirname : str
Directory to save the file to.
n_retries : int
Number of retries when HTTP errors are encountered.
delay : int
Number of seconds between retries.
Returns
-------
file_path: str
Full path of the created file.
"""
file_path = remote.filename if dirname is None else join(dirname, remote.filename)
retry_cnt = n_retries
while 1:
try:
urlretrieve(remote.url, file_path)
break
except URLError:
if retry_cnt > 0:
warnings.warn("Retry downloading from url: {}".format(remote.url))
time.sleep(delay)
retry_cnt -= 1
else:
raise
checksum = _sha256(file_path)
if remote.checksum != checksum:
raise IOError(
"{} has an SHA256 checksum ({}) "
"differing from expected ({}), "
"file may be corrupted.".format(file_path, checksum, remote.checksum)
)
return file_path
|
def _fetch_remote(remote, dirname=None, n_retries=3, delay=1):
"""Helper function to download a remote dataset into path
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 Checksum of the
downloaded file.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum
dirname : str
Directory to save the file to.
n_retries : int
Number of retries when HTTP errors are encountered.
delay : int
Number of seconds between retries.
Returns
-------
file_path: str
Full path of the created file.
"""
file_path = remote.filename if dirname is None else join(dirname, remote.filename)
retry_cnt = n_retries
while 1:
try:
urlretrieve(remote.url, file_path)
break
except URLError:
if retry_cnt > 0:
warnings.warn(f"Retry downloading from url: {remote.url}")
time.sleep(delay)
retry_cnt -= 1
else:
raise
checksum = _sha256(file_path)
if remote.checksum != checksum:
raise IOError(
"{} has an SHA256 checksum ({}) "
"differing from expected ({}), "
"file may be corrupted.".format(file_path, checksum, remote.checksum)
)
return file_path
|
5,319 |
def get_health_check_id_by_name(name, region=None, key=None, keyid=None, profile=None):
'''
Find Id of a health check with the given name (as tag).
name
The name associated with the health check.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.get_health_check_id_by_name ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
ids = get_health_check_ids_by_name(name, region=region, key=key, keyid=keyid, profile=profile)
if len(ids) > 1:
log.error(
'Request matched more than one HealthCheck (%s). Refine your '
'criteria and try again.', [z['Id'] for z in ret]
)
return ids[0] if len(ids) > 0 else None
|
def get_health_check_id_by_name(name, region=None, key=None, keyid=None, profile=None):
'''
Find Id of a health check with the given name (as tag).
name
The name associated with the health check.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.get_health_check_id_by_name ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
ids = get_health_check_ids_by_name(name, region=region, key=key, keyid=keyid, profile=profile)
if ids:
log.error(
'Request matched more than one HealthCheck (%s). Refine your '
'criteria and try again.', [z['Id'] for z in ret]
)
return ids[0] if len(ids) > 0 else None
|
19,616 |
def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ["gcc", "-dumpversion"]
gcc_ver_prefix = "gcc-"
# accept clang as system compiler too
clang_ver_command = ["clang", "--version"]
clang_ver_prefix = "clang-"
ubinpath = os.path.join("/", portage.const.EPREFIX, "usr", "bin")
def getclangversion(output):
version = re.search("clang version ([0-9.]+) ", output)
if version:
return version.group(1)
return "unknown"
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n"
+ "!!! to update the environment of this terminal and possibly\n"
+ "!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(
[ubinpath + "/" + "gcc-config", "-c"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + gcc_ver_command[0]]
+ gcc_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
# no GCC? try Clang
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + clang_ver_command[0]]
+ clang_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return clang_ver_prefix + getclangversion(myoutput)
try:
proc = subprocess.Popen(
gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
|
def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ["gcc", "-dumpversion"]
gcc_ver_prefix = "gcc-"
# accept clang as system compiler too
clang_ver_command = ["clang", "--version"]
clang_ver_prefix = "clang-"
ubinpath = os.path.join("/", portage.const.EPREFIX, "usr", "bin")
def getclangversion(output):
version = re.search("clang version ([0-9.]+) ", output)
if version:
return version.group(1)
return "unknown"
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n"
+ "!!! to update the environment of this terminal and possibly\n"
+ "!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(
[ubinpath + "/" + "gcc-config", "-c"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + gcc_ver_command[0]]
+ gcc_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
# no GCC? try Clang
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + clang_ver_command[0]]
+ clang_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return clang_ver_prefix + get_clang_version(myoutput)
try:
proc = subprocess.Popen(
gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
|
10,692 |
def magma(n: int) -> Palette:
""" Generate a palette of colors or from the Magma palette.
The full Magma palette that serves as input for deriving new palettes
has 256 colors, and looks like:
:bokeh-palette:`magma(256)`
Args:
n (int) : size of the palette to generate
Returns:
seq[str] : a sequence of hex RGB color strings
Raises:
ValueError if n is greater than the base palette length of 256
Examples:
.. code-block:: python
>>> magma(6)
('#000003', '#3B0F6F', '#8C2980', '#DD4968', '#FD9F6C', '#FBFCBF')
The resulting palette looks like: :bokeh-palette:`magma(6)`
"""
return linear_palette(Magma256, n)
|
def magma(n: int) -> Palette:
""" Generate a palette of colors from the Magma palette.
The full Magma palette that serves as input for deriving new palettes
has 256 colors, and looks like:
:bokeh-palette:`magma(256)`
Args:
n (int) : size of the palette to generate
Returns:
seq[str] : a sequence of hex RGB color strings
Raises:
ValueError if n is greater than the base palette length of 256
Examples:
.. code-block:: python
>>> magma(6)
('#000003', '#3B0F6F', '#8C2980', '#DD4968', '#FD9F6C', '#FBFCBF')
The resulting palette looks like: :bokeh-palette:`magma(6)`
"""
return linear_palette(Magma256, n)
|
8,748 |
def test_save_unmodified_config(multi_fakeconfig):
"""Assert type attributes are kept as they should be"""
multi_fakeconfig.save()
saved_config = config.Config(multi_fakeconfig.filename)
saved_config.define_section('fake', FakeConfigSection)
saved_config.define_section('spam', SpamSection)
# core
assert saved_config.core.owner == 'dgw'
# fake
assert saved_config.fake.valattr is None
assert saved_config.fake.listattr == []
assert saved_config.fake.choiceattr is None
assert saved_config.fake.af_fileattr is None
assert saved_config.fake.ad_fileattr is None
assert saved_config.fake.rf_fileattr is None
assert saved_config.fake.rd_fileattr is None
# spam
assert saved_config.spam.eggs == [
'one',
'two',
'three',
'four',
'and a half', # no-breakline + comma
], 'Comma separated line: "four" and "and a half" must be separated'
assert saved_config.spam.bacons == [
'grilled',
'burn out',
'greasy, fat, and tasty',
]
assert saved_config.spam.cheese == [
'cheddar',
'reblochon',
'camembert',
]
|
def test_save_unmodified_config(multi_fakeconfig):
"""Assert type attributes are kept as they should be"""
multi_fakeconfig.save()
saved_config = config.Config(multi_fakeconfig.filename)
saved_config.define_section('fake', FakeConfigSection)
saved_config.define_section('spam', SpamSection)
# core
assert saved_config.core.owner == 'dgw'
# fake
assert saved_config.fake.valattr is None
assert saved_config.fake.listattr == []
assert saved_config.fake.choiceattr is None
assert saved_config.fake.af_fileattr is None
assert saved_config.fake.ad_fileattr is None
assert saved_config.fake.rf_fileattr is None
assert saved_config.fake.rd_fileattr is None
# spam
assert saved_config.spam.eggs == [
'one',
'two',
'three',
'four',
'and a half', # no-newline + comma
], 'Comma separated line: "four" and "and a half" must be separated'
assert saved_config.spam.bacons == [
'grilled',
'burn out',
'greasy, fat, and tasty',
]
assert saved_config.spam.cheese == [
'cheddar',
'reblochon',
'camembert',
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.