id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
44,509 |
def func_to_container_op(
func: Callable,
output_component_file: Optional[str] = None,
base_image: Optional[str] = None,
extra_code: Optional[str] = '',
packages_to_install: List[str] = None,
modules_to_capture: List[str] = None,
use_code_pickling: bool = False,
annotations: Mapping[str, str] = None,
):
'''Converts a Python function to a component and returns a task
(:class:`kfp.dsl.ContainerOp`) factory.
Function docstring is used as component description. Argument and return annotations are used as component input/output types.
To declare a function with multiple return values, use the :code:`NamedTuple` return annotation syntax::
from typing import NamedTuple
def add_multiply_two_numbers(a: float, b: float) -> NamedTuple('DummyName', [('sum', float), ('product', float)]):
"""Returns sum and product of two arguments"""
return (a + b, a * b)
Args:
func: The python function to convert
base_image: Optional. Specify a custom Docker container image to use in the component. For lightweight components, the image needs to have python 3.5+. Default is tensorflow/tensorflow:1.13.2-py3
output_component_file: Optional. Write a component definition to a local file. Can be used for sharing.
extra_code: Optional. Extra code to add before the function code. Can be used as workaround to define types used in function signature.
packages_to_install: Optional. List of [versioned] python packages to pip install before executing the user function.
modules_to_capture: Optional. List of module names that will be captured (instead of just referencing) during the dependency scan. By default the :code:`func.__module__` is captured. The actual algorithm: Starting with the initial function, start traversing dependencies. If the :code:`dependency.__module__` is in the :code:`modules_to_capture` list then it's captured and it's dependencies are traversed. Otherwise the dependency is only referenced instead of capturing and its dependencies are not traversed.
use_code_pickling: Specifies whether the function code should be captured using pickling as opposed to source code manipulation. Pickling has better support for capturing dependencies, but is sensitive to version mismatch between python in component creation environment and runtime image.
annotations: Optional. Allows adding arbitrary key-value data to the component specification.
Returns:
A factory function with a strongly-typed signature taken from the python function.
Once called with the required arguments, the factory constructs a pipeline task instance (:class:`kfp.dsl.ContainerOp`) that can run the original function in a container.
'''
component_spec = _func_to_component_spec(
func=func,
extra_code=extra_code,
base_image=base_image,
packages_to_install=packages_to_install,
modules_to_capture=modules_to_capture,
use_code_pickling=use_code_pickling,
)
if annotations:
component_spec.metadata = structures.MetadataSpec(
annotations=annotations,
)
output_component_file = output_component_file or getattr(func, '_component_target_component_file', None)
if output_component_file:
component_spec.save(output_component_file)
#TODO: assert ComponentSpec.from_dict(load_yaml(output_component_file)) == component_spec
return _create_task_factory_from_component_spec(component_spec)
|
def func_to_container_op(
func: Callable,
output_component_file: Optional[str] = None,
base_image: Optional[str] = None,
extra_code: Optional[str] = '',
packages_to_install: List[str] = None,
modules_to_capture: List[str] = None,
use_code_pickling: bool = False,
annotations: Optional[Mapping[str, str]] = None,
):
'''Converts a Python function to a component and returns a task
(:class:`kfp.dsl.ContainerOp`) factory.
Function docstring is used as component description. Argument and return annotations are used as component input/output types.
To declare a function with multiple return values, use the :code:`NamedTuple` return annotation syntax::
from typing import NamedTuple
def add_multiply_two_numbers(a: float, b: float) -> NamedTuple('DummyName', [('sum', float), ('product', float)]):
"""Returns sum and product of two arguments"""
return (a + b, a * b)
Args:
func: The python function to convert
base_image: Optional. Specify a custom Docker container image to use in the component. For lightweight components, the image needs to have python 3.5+. Default is tensorflow/tensorflow:1.13.2-py3
output_component_file: Optional. Write a component definition to a local file. Can be used for sharing.
extra_code: Optional. Extra code to add before the function code. Can be used as workaround to define types used in function signature.
packages_to_install: Optional. List of [versioned] python packages to pip install before executing the user function.
modules_to_capture: Optional. List of module names that will be captured (instead of just referencing) during the dependency scan. By default the :code:`func.__module__` is captured. The actual algorithm: Starting with the initial function, start traversing dependencies. If the :code:`dependency.__module__` is in the :code:`modules_to_capture` list then it's captured and it's dependencies are traversed. Otherwise the dependency is only referenced instead of capturing and its dependencies are not traversed.
use_code_pickling: Specifies whether the function code should be captured using pickling as opposed to source code manipulation. Pickling has better support for capturing dependencies, but is sensitive to version mismatch between python in component creation environment and runtime image.
annotations: Optional. Allows adding arbitrary key-value data to the component specification.
Returns:
A factory function with a strongly-typed signature taken from the python function.
Once called with the required arguments, the factory constructs a pipeline task instance (:class:`kfp.dsl.ContainerOp`) that can run the original function in a container.
'''
component_spec = _func_to_component_spec(
func=func,
extra_code=extra_code,
base_image=base_image,
packages_to_install=packages_to_install,
modules_to_capture=modules_to_capture,
use_code_pickling=use_code_pickling,
)
if annotations:
component_spec.metadata = structures.MetadataSpec(
annotations=annotations,
)
output_component_file = output_component_file or getattr(func, '_component_target_component_file', None)
if output_component_file:
component_spec.save(output_component_file)
#TODO: assert ComponentSpec.from_dict(load_yaml(output_component_file)) == component_spec
return _create_task_factory_from_component_spec(component_spec)
|
2,048 |
def mean_variance_axis(X, axis):
"""Compute mean and variance along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
Returns
-------
means : ndarray of float of shape (n_features,)
Feature-wise means.
variances : ndarray of float of shape (n_features,)
Feature-wise variances.
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
|
def mean_variance_axis(X, axis):
"""Compute mean and variance along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
Returns
-------
means : ndarray of shape (n_features,), dtype=float
Feature-wise means.
variances : ndarray of float of shape (n_features,)
Feature-wise variances.
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
|
50,495 |
def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.branch_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return [
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
]
|
def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.branch_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return (
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
)
|
29,442 |
def load_show_hide(sh_file):
global show_hide_buttons, show_hide_patterns, show_or_hide
try:
with open(sh_file) as f:
logger.debug("Loading sample renaming config settings from: {}".format(sh_file))
show_hide_buttons.append("all")
show_hide_patterns.append("")
show_or_hide.append("show")
for l in f:
s = l.strip().split("\t")
if len(s) == 3:
show_hide_buttons.append(s[0])
show_hide_patterns.append(s[1])
if not s[2] in ["show", "hide"]:
raise IOError
show_or_hide.append(s[2])
except (IOError, AttributeError) as e:
logger.error("Error loading show patterns file: {}".format(e))
logger.debug("Found {} show/hide patterns".format(len(show_hide_buttons) - 1))
|
def load_show_hide(sh_file):
global show_hide_buttons, show_hide_patterns, show_or_hide
try:
with open(sh_file) as f:
logger.debug("Loading sample renaming config settings from: {}".format(sh_file))
show_hide_buttons.append("Show all")
show_hide_patterns.append("")
show_or_hide.append("show")
for l in f:
s = l.strip().split("\t")
if len(s) == 3:
show_hide_buttons.append(s[0])
show_hide_patterns.append(s[1])
if not s[2] in ["show", "hide"]:
raise IOError
show_or_hide.append(s[2])
except (IOError, AttributeError) as e:
logger.error("Error loading show patterns file: {}".format(e))
logger.debug("Found {} show/hide patterns".format(len(show_hide_buttons) - 1))
|
6,724 |
def ctcpExtract(message):
"""
Extract CTCP data from a string.
@return: A C{dict} containing two keys:
- C{'extended'}: A list of CTCP (tag, data) tuples.
- C{'normal'}: A list of strings which were not inside a CTCP delimiter.
"""
extended_messages = []
normal_messages = []
retval = {"extended": extended_messages, "normal": normal_messages}
messages = message.split(X_DELIM)
odd = 0
# X1 extended data X2 nomal data X3 extended data X4 normal...
while messages:
if odd:
extended_messages.append(messages.pop(0))
else:
normal_messages.append(messages.pop(0))
odd = not odd
extended_messages[:] = list(filter(None, extended_messages))
normal_messages[:] = list(filter(None, normal_messages))
extended_messages[:] = list(map(ctcpDequote, extended_messages))
for i in range(len(extended_messages)):
m = extended_messages[i].split(SPC, 1)
tag = m[0]
if len(m) > 1:
data = m[1]
else:
data = None
extended_messages[i] = (tag, data)
return retval
|
def ctcpExtract(message):
"""
Extract CTCP data from a string.
@return: A C{dict} containing two keys:
- C{'extended'}: A list of CTCP (tag, data) tuples.
- C{'normal'}: A list of strings which were not inside a CTCP delimiter.
"""
extended_messages = []
normal_messages = []
retval = {"extended": extended_messages, "normal": normal_messages}
messages = message.split(X_DELIM)
odd = 0
# X1 extended data X2 nomal data X3 extended data X4 normal...
while messages:
if odd:
extended_messages.append(messages.pop(0))
else:
normal_messages.append(messages.pop(0))
odd = not odd
extended_messages = list(filter(None, extended_messages))
normal_messages[:] = list(filter(None, normal_messages))
extended_messages[:] = list(map(ctcpDequote, extended_messages))
for i in range(len(extended_messages)):
m = extended_messages[i].split(SPC, 1)
tag = m[0]
if len(m) > 1:
data = m[1]
else:
data = None
extended_messages[i] = (tag, data)
return retval
|
4,199 |
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles, colorbar=True, mask=None, mask_cmap=None,
mask_style=None, mask_alpha=.25, nave=None,
time_unit='s', show_names=False, ch_names=None):
"""Plot images."""
import matplotlib.pyplot as plt
assert time_unit is not None
if show_names == "auto":
if picks is not None:
show_names = "all" if len(picks) < 25 else True
else:
show_names = False
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
if picks is not None:
data = data[picks]
if mask is not None:
mask = mask[picks]
# Show the image
# Set amplitude scaling
data = this_scaling * data
if ylim is None or this_type not in ylim:
vmax = np.abs(data).max()
vmin = -vmax
else:
vmin, vmax = ylim[this_type]
_check_if_nan(data)
im, t_end = _plot_masked_image(
ax, data, times, mask, yvals=None, cmap=cmap[0],
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap)
# ignore xlim='tight'; happens automatically with `extent` in imshow
xlim = None if xlim == 'tight' else xlim
if xlim is not None:
ax.set_xlim(xlim)
if colorbar:
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ylabel = "Channels" if show_names else 'Channel (index)'
t = titles[this_type] + ' (%d channel%s' % (len(data), _pl(data)) + t_end
ax.set(ylabel=ylabel, xlabel='Time (%s)' % (time_unit,), title=t)
_add_nave(ax, nave)
if show_names == "all":
yticks = np.arange(len(picks)).astype(int)
yticklabels = np.array(ch_names)[picks]
else:
yticks = np.round(ax.get_yticks()).astype(int)
yticks = np.intersect1d(yticks, np.arange(len(picks), dtype=int))
if show_names:
yticklabels = np.array(ch_names)[picks][yticks]
else:
yticklabels = np.array(picks)[yticks]
ax.set(yticks=yticks, yticklabels=yticklabels)
|
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles, colorbar=True, mask=None, mask_cmap=None,
mask_style=None, mask_alpha=.25, nave=None,
time_unit='s', show_names=False, ch_names=None):
"""Plot images."""
import matplotlib.pyplot as plt
assert time_unit is not None
if show_names == "auto":
if picks is not None:
show_names = "all" if len(picks) < 25 else True
else:
show_names = False
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
if picks is not None:
data = data[picks]
if mask is not None:
mask = mask[picks]
# Show the image
# Set amplitude scaling
data = this_scaling * data
if ylim is None or this_type not in ylim:
vmax = np.abs(data).max()
vmin = -vmax
else:
vmin, vmax = ylim[this_type]
_check_if_nan(data)
im, t_end = _plot_masked_image(
ax, data, times, mask, yvals=None, cmap=cmap[0],
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap)
# ignore xlim='tight'; happens automatically with `extent` in imshow
xlim = None if xlim == 'tight' else xlim
if xlim is not None:
ax.set_xlim(xlim)
if colorbar:
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ylabel = "Channels" if show_names else 'Channel (index)'
t = titles[this_type] + ' (%d channel%s' % (len(data), _pl(data)) + t_end
ax.set(ylabel=ylabel, xlabel='Time (%s)' % (time_unit,), title=t)
_add_nave(ax, nave)
if show_names == "all":
yticks = np.arange(len(picks), dtype=int)
yticklabels = np.array(ch_names)[picks]
else:
yticks = np.round(ax.get_yticks()).astype(int)
yticks = np.intersect1d(yticks, np.arange(len(picks), dtype=int))
if show_names:
yticklabels = np.array(ch_names)[picks][yticks]
else:
yticklabels = np.array(picks)[yticks]
ax.set(yticks=yticks, yticklabels=yticklabels)
|
21,979 |
def validateSessionWithToken(sydent, sid, clientSecret, token, next_link=None):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
If the session was sucessfully validated, return a dict
with 'success': True that can be sent to the client,
otherwise return False.
:param sid: The session ID
:type sid: str
:param clientSecret: The client_secret originally set when requesting the session
:type clientSecret: str
:param token: The validation token
:type token: str
:param next_link: The link to redirect the client to after validation, if provided
:type next_link: str|None
:return: The JSON to return to the client on success, or False on fail
:rtype: Dict|bool
:raises IncorrectClientSecretException if the client secret does not match the sid
:raises SessionExpiredException is the provided session has expired
:raises NextLinkValidationException if the next_link provided is different
from one provided in a previous, successful validation attempt
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", (sid,))
return False
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", (sid,))
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# Check whether this session has already been validated with a next_link provided
# If so, and the next_link this time around is different than previously, then the
# user may be getting phished. Reject the validation attempt.
if next_link and valSessionStore.next_link_differs(sid, token, next_link):
logger.info(
"Validation attempt rejected as provided next_link is different "
"from that in a previous, successful validation attempt with this "
"session id"
)
raise NextLinkValidationException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", s.id)
valSessionStore.setValidated(s.id, True)
if next_link:
valSessionStore.set_next_link_for_token(s.id, s.token, next_link)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return False
|
def validateSessionWithToken(sydent, sid, clientSecret, token, next_link=None):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
If the session was sucessfully validated, return a dict
with 'success': True that can be sent to the client,
otherwise return False.
:param sid: The session ID
:type sid: str
:param clientSecret: The client_secret originally set when requesting the session
:type clientSecret: str
:param token: The validation token
:type token: str
:param next_link: The link to redirect the client to after validation, if provided
:type next_link: str|None
:return: The JSON to return to the client on success, or False on fail
:rtype: Dict|bool
:raises IncorrectClientSecretException if the client secret does not match the sid
:raises SessionExpiredException is the provided session has expired
:raises NextLinkValidationException if the next_link provided is different
from one provided in a previous, successful validation attempt
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", (sid,))
return False
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", sid)
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# Check whether this session has already been validated with a next_link provided
# If so, and the next_link this time around is different than previously, then the
# user may be getting phished. Reject the validation attempt.
if next_link and valSessionStore.next_link_differs(sid, token, next_link):
logger.info(
"Validation attempt rejected as provided next_link is different "
"from that in a previous, successful validation attempt with this "
"session id"
)
raise NextLinkValidationException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", s.id)
valSessionStore.setValidated(s.id, True)
if next_link:
valSessionStore.set_next_link_for_token(s.id, s.token, next_link)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return False
|
45,869 |
def tiltProjection(taux: torch.Tensor, tauy: torch.Tensor, inv: bool = False) -> torch.Tensor:
r"""Estimate the tilt projection matrix or the inverse tilt projection matrix
Args:
taux (torch.Tensor): Rotation angle in radians around the :math:`x`-axis with shape :math:`(*, 1)`.
tauy (torch.Tensor): Rotation angle in radians around the :math:`y`-axis with shape :math:`(*, 1)`.
inv (bool): False to obtain the the tilt projection matrix. False for the inverse matrix
Returns:
torch.Tensor: Inverse tilt projection matrix with shape :math:`(*, 3, 3)`.
"""
assert taux.dim() == tauy.dim()
assert taux.numel() == tauy.numel()
ndim = taux.dim()
taux = taux.reshape(-1)
tauy = tauy.reshape(-1)
cTx = torch.cos(taux)
sTx = torch.sin(taux)
cTy = torch.cos(tauy)
sTy = torch.sin(tauy)
zero = torch.zeros_like(cTx)
one = torch.ones_like(cTx)
Rx = torch.stack([one, zero, zero, zero, cTx, sTx, zero, -sTx, cTx], -1).reshape(-1, 3, 3)
Ry = torch.stack([cTy, zero, -sTy, zero, one, zero, sTy, zero, cTy], -1).reshape(-1, 3, 3)
R = Ry @ Rx
if inv:
invR22 = 1 / R[..., 2, 2]
invPz = torch.stack(
[invR22, zero, R[..., 0, 2] * invR22,
zero, invR22, R[..., 1, 2] * invR22,
zero, zero, one], -1
).reshape(-1, 3, 3)
invTilt = R.transpose(-1, -2) @ invPz
if ndim == 0:
invTilt = torch.squeeze(invTilt)
return invTilt
else:
Pz = torch.stack(
[R[..., 2, 2], zero, -R[..., 0, 2],
zero, R[..., 2, 2], -R[..., 1, 2],
zero, zero, one], -1
).reshape(-1, 3, 3)
tilt = Pz @ R.transpose(-1, -2)
if ndim == 0:
tilt = torch.squeeze(tilt)
return tilt
|
def tiltProjection(taux: torch.Tensor, tauy: torch.Tensor, inv: bool = False) -> torch.Tensor:
r"""Estimate the tilt projection matrix or the inverse tilt projection matrix
Args:
taux: Rotation angle in radians around the :math:`x`-axis with shape :math:`(*, 1)`.
tauy (torch.Tensor): Rotation angle in radians around the :math:`y`-axis with shape :math:`(*, 1)`.
inv (bool): False to obtain the the tilt projection matrix. False for the inverse matrix
Returns:
torch.Tensor: Inverse tilt projection matrix with shape :math:`(*, 3, 3)`.
"""
assert taux.dim() == tauy.dim()
assert taux.numel() == tauy.numel()
ndim = taux.dim()
taux = taux.reshape(-1)
tauy = tauy.reshape(-1)
cTx = torch.cos(taux)
sTx = torch.sin(taux)
cTy = torch.cos(tauy)
sTy = torch.sin(tauy)
zero = torch.zeros_like(cTx)
one = torch.ones_like(cTx)
Rx = torch.stack([one, zero, zero, zero, cTx, sTx, zero, -sTx, cTx], -1).reshape(-1, 3, 3)
Ry = torch.stack([cTy, zero, -sTy, zero, one, zero, sTy, zero, cTy], -1).reshape(-1, 3, 3)
R = Ry @ Rx
if inv:
invR22 = 1 / R[..., 2, 2]
invPz = torch.stack(
[invR22, zero, R[..., 0, 2] * invR22,
zero, invR22, R[..., 1, 2] * invR22,
zero, zero, one], -1
).reshape(-1, 3, 3)
invTilt = R.transpose(-1, -2) @ invPz
if ndim == 0:
invTilt = torch.squeeze(invTilt)
return invTilt
else:
Pz = torch.stack(
[R[..., 2, 2], zero, -R[..., 0, 2],
zero, R[..., 2, 2], -R[..., 1, 2],
zero, zero, one], -1
).reshape(-1, 3, 3)
tilt = Pz @ R.transpose(-1, -2)
if ndim == 0:
tilt = torch.squeeze(tilt)
return tilt
|
48,836 |
def sensor(python_callable: Optional[Callable] = None, multiple_outputs: Optional[bool] = None, **kwargs):
"""
Wraps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:type python_callable: Optional[Callable]
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. List/Tuples will unroll to xcom values
with index as key. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
:type multiple_outputs: bool
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=DecoratedSensorOperator,
**kwargs,
)
|
def sensor(python_callable: Optional[Callable] = None, multiple_outputs: Optional[bool] = None, **kwargs):
"""
Wraps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:type python_callable: Optional[Callable]
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys.
Defaults to False.
:type multiple_outputs: bool
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=DecoratedSensorOperator,
**kwargs,
)
|
50,050 |
def configure_parser_notices(sub_parsers, name="notices"):
example = dedent(
f"""
Examples:
conda {name}
conda {name} -c defaults
"""
)
p = sub_parsers.add_parser(
name,
description=NOTICES_DESCRIPTION,
help=NOTICES_HELP,
epilog=example,
)
p.add_argument(
"--no-ansi-colors",
action="store_true",
default=False,
help="Do not display ANSI terminal colors.",
)
add_parser_channels(p)
p.set_defaults(func=".main_notices.execute")
|
def configure_parser_notices(sub_parsers, name="notices"):
example = dals(
f"""
Examples:
conda {name}
conda {name} -c defaults
"""
)
p = sub_parsers.add_parser(
name,
description=NOTICES_DESCRIPTION,
help=NOTICES_HELP,
epilog=example,
)
p.add_argument(
"--no-ansi-colors",
action="store_true",
default=False,
help="Do not display ANSI terminal colors.",
)
add_parser_channels(p)
p.set_defaults(func=".main_notices.execute")
|
38,443 |
def set_state(data: Dict, state: Optional[Dict] = None) -> Dict:
""" Initialize or update a state dictionary.
The initialization consists of adding a state dictionary in the proper field of the
data dictionary. If there is a state dictionary in data, the new state is added
using the update method of dictionaries.
Args:
data: Outer data dictionary, to which the parameters will be added.
state: A dictionary with the state, set to an empty dictionary if not provided.
Returns:
data: The filled dictionary.
"""
if state is None:
state = {}
if pp.STATE in data:
data[pp.STATE].update(state)
else:
data[pp.STATE] = state
return data
|
def set_state(data: Dict, state: Optional[Dict] = None) -> Dict:
""" Initialize or update a state dictionary.
The initialization consists of adding a state dictionary in the proper field of the
data dictionary. If there is a state dictionary in data, the new state is added
using the update method of dictionaries.
Args:
data: Outer data dictionary, to which the parameters will be added.
state: A dictionary with the state, set to an empty dictionary if not provided.
Returns:
data: The filled dictionary.
"""
state = state or {}
if pp.STATE in data:
data[pp.STATE].update(state)
else:
data[pp.STATE] = state
return data
|
11,907 |
def _save(im, fp, filename):
try:
rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode]
except KeyError as e:
raise OSError(f"cannot write mode {im.mode} as TIFF") from e
ifd = ImageFileDirectory_v2(prefix=prefix)
compression = im.encoderinfo.get("compression", im.info.get("compression"))
if compression is None:
compression = "raw"
elif compression == "tiff_jpeg":
# OJPEG is obsolete, so use new-style JPEG compression instead
compression = "jpeg"
elif compression == "tiff_deflate":
compression = "tiff_adobe_deflate"
libtiff = WRITE_LIBTIFF or compression != "raw"
# required for color libtiff images
ifd[PLANAR_CONFIGURATION] = getattr(im, "_planar_configuration", 1)
ifd[IMAGEWIDTH] = im.size[0]
ifd[IMAGELENGTH] = im.size[1]
# write any arbitrary tags passed in as an ImageFileDirectory
info = im.encoderinfo.get("tiffinfo", {})
logger.debug("Tiffinfo Keys: %s" % list(info))
if isinstance(info, ImageFileDirectory_v1):
info = info.to_v2()
for key in info:
ifd[key] = info.get(key)
try:
ifd.tagtype[key] = info.tagtype[key]
except Exception:
pass # might not be an IFD. Might not have populated type
# additions written by Greg Couch, [email protected]
# inspired by image-sig posting from Kevin Cazabon, [email protected]
if hasattr(im, "tag_v2"):
# preserve tags from original TIFF image file
for key in (
RESOLUTION_UNIT,
X_RESOLUTION,
Y_RESOLUTION,
IPTC_NAA_CHUNK,
PHOTOSHOP_CHUNK,
XMP,
):
if key in im.tag_v2:
ifd[key] = im.tag_v2[key]
ifd.tagtype[key] = im.tag_v2.tagtype[key]
# preserve ICC profile (should also work when saving other formats
# which support profiles as TIFF) -- 2008-06-06 Florian Hoech
icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile"))
if icc:
ifd[ICCPROFILE] = icc
for key, name in [
(IMAGEDESCRIPTION, "description"),
(X_RESOLUTION, "resolution"),
(Y_RESOLUTION, "resolution"),
(X_RESOLUTION, "x_resolution"),
(Y_RESOLUTION, "y_resolution"),
(RESOLUTION_UNIT, "resolution_unit"),
(SOFTWARE, "software"),
(DATE_TIME, "date_time"),
(ARTIST, "artist"),
(COPYRIGHT, "copyright"),
]:
if name in im.encoderinfo:
ifd[key] = im.encoderinfo[name]
dpi = im.encoderinfo.get("dpi")
if dpi:
ifd[RESOLUTION_UNIT] = 2
ifd[X_RESOLUTION] = dpi[0]
ifd[Y_RESOLUTION] = dpi[1]
if bits != (1,):
ifd[BITSPERSAMPLE] = bits
if len(bits) != 1:
ifd[SAMPLESPERPIXEL] = len(bits)
if extra is not None:
ifd[EXTRASAMPLES] = extra
if format != 1:
ifd[SAMPLEFORMAT] = format
ifd[PHOTOMETRIC_INTERPRETATION] = photo
if im.mode in ["P", "PA"]:
lut = im.im.getpalette("RGB", "RGB;L")
ifd[COLORMAP] = tuple(v * 256 for v in lut)
# data orientation
stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8)
# aim for 64 KB strips when using libtiff writer
if libtiff:
rows_per_strip = min((2 ** 16 + stride - 1) // stride, im.size[1])
# JPEG encoder expects multiple of 8 rows
if compression == "jpeg":
rows_per_strip = min(((rows_per_strip + 7) // 8) * 8, im.size[1])
else:
rows_per_strip = im.size[1]
strip_byte_counts = stride * rows_per_strip
strips_per_image = (im.size[1] + rows_per_strip - 1) // rows_per_strip
ifd[ROWSPERSTRIP] = rows_per_strip
if strip_byte_counts >= 2 ** 16:
ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG
ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + (
stride * im.size[1] - strip_byte_counts * (strips_per_image - 1),
)
ifd[STRIPOFFSETS] = tuple(
range(0, strip_byte_counts * strips_per_image, strip_byte_counts)
) # this is adjusted by IFD writer
# no compression by default:
ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1)
if libtiff:
if "quality" in im.encoderinfo:
quality = im.encoderinfo["quality"]
if not isinstance(quality, int) or quality < 0 or quality > 100:
raise ValueError("Invalid quality setting")
if compression != "jpeg":
raise ValueError(
"quality setting only supported for 'jpeg' compression"
)
ifd[JPEGQUALITY] = quality
logger.debug("Saving using libtiff encoder")
logger.debug("Items: %s" % sorted(ifd.items()))
_fp = 0
if hasattr(fp, "fileno"):
try:
fp.seek(0)
_fp = os.dup(fp.fileno())
except io.UnsupportedOperation:
pass
# optional types for non core tags
types = {}
# SAMPLEFORMAT is determined by the image format and should not be copied
# from legacy_ifd.
# STRIPOFFSETS and STRIPBYTECOUNTS are added by the library
# based on the data in the strip.
# The other tags expect arrays with a certain length (fixed or depending on
# BITSPERSAMPLE, etc), passing arrays with a different length will result in
# segfaults. Block these tags until we add extra validation.
# SUBIFD may also cause a segfault.
blocklist = [
REFERENCEBLACKWHITE,
SAMPLEFORMAT,
STRIPBYTECOUNTS,
STRIPOFFSETS,
TRANSFERFUNCTION,
SUBIFD,
]
atts = {}
# bits per sample is a single short in the tiff directory, not a list.
atts[BITSPERSAMPLE] = bits[0]
# Merge the ones that we have with (optional) more bits from
# the original file, e.g x,y resolution so that we can
# save(load('')) == original file.
legacy_ifd = {}
if hasattr(im, "tag"):
legacy_ifd = im.tag.to_v2()
for tag, value in itertools.chain(
ifd.items(), getattr(im, "tag_v2", {}).items(), legacy_ifd.items()
):
# Libtiff can only process certain core items without adding
# them to the custom dictionary.
# Custom items are supported for int, float, unicode, string and byte
# values. Other types and tuples require a tagtype.
if tag not in TiffTags.LIBTIFF_CORE:
if not Image.core.libtiff_support_custom_tags:
continue
if tag in ifd.tagtype:
types[tag] = ifd.tagtype[tag]
elif not (isinstance(value, (int, float, str, bytes))):
continue
else:
type = TiffTags.lookup(tag).type
if type:
types[tag] = type
if tag not in atts and tag not in blocklist:
if isinstance(value, str):
atts[tag] = value.encode("ascii", "replace") + b"\0"
elif isinstance(value, IFDRational):
atts[tag] = float(value)
else:
atts[tag] = value
logger.debug("Converted items: %s" % sorted(atts.items()))
# libtiff always expects the bytes in native order.
# we're storing image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if im.mode in ("I;16B", "I;16"):
rawmode = "I;16N"
# Pass tags as sorted list so that the tags are set in a fixed order.
# This is required by libtiff for some tags. For example, the JPEGQUALITY
# pseudo tag requires that the COMPRESS tag was already set.
tags = list(atts.items())
tags.sort()
a = (rawmode, compression, _fp, filename, tags, types)
e = Image._getencoder(im.mode, "libtiff", a, im.encoderconfig)
e.setimage(im.im, (0, 0) + im.size)
while True:
# undone, change to self.decodermaxblock:
l, s, d = e.encode(16 * 1024)
if not _fp:
fp.write(d)
if s:
break
if s < 0:
raise OSError(f"encoder error {s} when writing image file")
else:
offset = ifd.save(fp)
ImageFile._save(
im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))]
)
# -- helper for multi-page save --
if "_debug_multipage" in im.encoderinfo:
# just to access o32 and o16 (using correct byte order)
im._debug_multipage = ifd
|
def _save(im, fp, filename):
try:
rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode]
except KeyError as e:
raise OSError(f"cannot write mode {im.mode} as TIFF") from e
ifd = ImageFileDirectory_v2(prefix=prefix)
compression = im.encoderinfo.get("compression", im.info.get("compression"))
if compression is None:
compression = "raw"
elif compression == "tiff_jpeg":
# OJPEG is obsolete, so use new-style JPEG compression instead
compression = "jpeg"
elif compression == "tiff_deflate":
compression = "tiff_adobe_deflate"
libtiff = WRITE_LIBTIFF or compression != "raw"
# required for color libtiff images
ifd[PLANAR_CONFIGURATION] = getattr(im, "_planar_configuration", 1)
ifd[IMAGEWIDTH] = im.size[0]
ifd[IMAGELENGTH] = im.size[1]
# write any arbitrary tags passed in as an ImageFileDirectory
info = im.encoderinfo.get("tiffinfo", {})
logger.debug("Tiffinfo Keys: %s" % list(info))
if isinstance(info, ImageFileDirectory_v1):
info = info.to_v2()
for key in info:
ifd[key] = info.get(key)
try:
ifd.tagtype[key] = info.tagtype[key]
except Exception:
pass # might not be an IFD. Might not have populated type
# additions written by Greg Couch, [email protected]
# inspired by image-sig posting from Kevin Cazabon, [email protected]
if hasattr(im, "tag_v2"):
# preserve tags from original TIFF image file
for key in (
RESOLUTION_UNIT,
X_RESOLUTION,
Y_RESOLUTION,
IPTC_NAA_CHUNK,
PHOTOSHOP_CHUNK,
XMP,
):
if key in im.tag_v2:
ifd[key] = im.tag_v2[key]
ifd.tagtype[key] = im.tag_v2.tagtype[key]
# preserve ICC profile (should also work when saving other formats
# which support profiles as TIFF) -- 2008-06-06 Florian Hoech
icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile"))
if icc:
ifd[ICCPROFILE] = icc
for key, name in [
(IMAGEDESCRIPTION, "description"),
(X_RESOLUTION, "resolution"),
(Y_RESOLUTION, "resolution"),
(X_RESOLUTION, "x_resolution"),
(Y_RESOLUTION, "y_resolution"),
(RESOLUTION_UNIT, "resolution_unit"),
(SOFTWARE, "software"),
(DATE_TIME, "date_time"),
(ARTIST, "artist"),
(COPYRIGHT, "copyright"),
]:
if name in im.encoderinfo:
ifd[key] = im.encoderinfo[name]
dpi = im.encoderinfo.get("dpi")
if dpi:
ifd[RESOLUTION_UNIT] = 2
ifd[X_RESOLUTION] = dpi[0]
ifd[Y_RESOLUTION] = dpi[1]
if bits != (1,):
ifd[BITSPERSAMPLE] = bits
if len(bits) != 1:
ifd[SAMPLESPERPIXEL] = len(bits)
if extra is not None:
ifd[EXTRASAMPLES] = extra
if format != 1:
ifd[SAMPLEFORMAT] = format
ifd[PHOTOMETRIC_INTERPRETATION] = photo
if im.mode in ["P", "PA"]:
lut = im.im.getpalette("RGB", "RGB;L")
ifd[COLORMAP] = tuple(v * 256 for v in lut)
# data orientation
stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8)
# aim for 64 KB strips when using libtiff writer
if libtiff:
rows_per_strip = min((2 ** 16 + stride - 1) // stride, im.size[1])
# JPEG encoder expects multiple of 8 rows
if compression == "jpeg":
rows_per_strip = ((rows_per_strip + 7) // 8) * 8
else:
rows_per_strip = im.size[1]
strip_byte_counts = stride * rows_per_strip
strips_per_image = (im.size[1] + rows_per_strip - 1) // rows_per_strip
ifd[ROWSPERSTRIP] = rows_per_strip
if strip_byte_counts >= 2 ** 16:
ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG
ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + (
stride * im.size[1] - strip_byte_counts * (strips_per_image - 1),
)
ifd[STRIPOFFSETS] = tuple(
range(0, strip_byte_counts * strips_per_image, strip_byte_counts)
) # this is adjusted by IFD writer
# no compression by default:
ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1)
if libtiff:
if "quality" in im.encoderinfo:
quality = im.encoderinfo["quality"]
if not isinstance(quality, int) or quality < 0 or quality > 100:
raise ValueError("Invalid quality setting")
if compression != "jpeg":
raise ValueError(
"quality setting only supported for 'jpeg' compression"
)
ifd[JPEGQUALITY] = quality
logger.debug("Saving using libtiff encoder")
logger.debug("Items: %s" % sorted(ifd.items()))
_fp = 0
if hasattr(fp, "fileno"):
try:
fp.seek(0)
_fp = os.dup(fp.fileno())
except io.UnsupportedOperation:
pass
# optional types for non core tags
types = {}
# SAMPLEFORMAT is determined by the image format and should not be copied
# from legacy_ifd.
# STRIPOFFSETS and STRIPBYTECOUNTS are added by the library
# based on the data in the strip.
# The other tags expect arrays with a certain length (fixed or depending on
# BITSPERSAMPLE, etc), passing arrays with a different length will result in
# segfaults. Block these tags until we add extra validation.
# SUBIFD may also cause a segfault.
blocklist = [
REFERENCEBLACKWHITE,
SAMPLEFORMAT,
STRIPBYTECOUNTS,
STRIPOFFSETS,
TRANSFERFUNCTION,
SUBIFD,
]
atts = {}
# bits per sample is a single short in the tiff directory, not a list.
atts[BITSPERSAMPLE] = bits[0]
# Merge the ones that we have with (optional) more bits from
# the original file, e.g x,y resolution so that we can
# save(load('')) == original file.
legacy_ifd = {}
if hasattr(im, "tag"):
legacy_ifd = im.tag.to_v2()
for tag, value in itertools.chain(
ifd.items(), getattr(im, "tag_v2", {}).items(), legacy_ifd.items()
):
# Libtiff can only process certain core items without adding
# them to the custom dictionary.
# Custom items are supported for int, float, unicode, string and byte
# values. Other types and tuples require a tagtype.
if tag not in TiffTags.LIBTIFF_CORE:
if not Image.core.libtiff_support_custom_tags:
continue
if tag in ifd.tagtype:
types[tag] = ifd.tagtype[tag]
elif not (isinstance(value, (int, float, str, bytes))):
continue
else:
type = TiffTags.lookup(tag).type
if type:
types[tag] = type
if tag not in atts and tag not in blocklist:
if isinstance(value, str):
atts[tag] = value.encode("ascii", "replace") + b"\0"
elif isinstance(value, IFDRational):
atts[tag] = float(value)
else:
atts[tag] = value
logger.debug("Converted items: %s" % sorted(atts.items()))
# libtiff always expects the bytes in native order.
# we're storing image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if im.mode in ("I;16B", "I;16"):
rawmode = "I;16N"
# Pass tags as sorted list so that the tags are set in a fixed order.
# This is required by libtiff for some tags. For example, the JPEGQUALITY
# pseudo tag requires that the COMPRESS tag was already set.
tags = list(atts.items())
tags.sort()
a = (rawmode, compression, _fp, filename, tags, types)
e = Image._getencoder(im.mode, "libtiff", a, im.encoderconfig)
e.setimage(im.im, (0, 0) + im.size)
while True:
# undone, change to self.decodermaxblock:
l, s, d = e.encode(16 * 1024)
if not _fp:
fp.write(d)
if s:
break
if s < 0:
raise OSError(f"encoder error {s} when writing image file")
else:
offset = ifd.save(fp)
ImageFile._save(
im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))]
)
# -- helper for multi-page save --
if "_debug_multipage" in im.encoderinfo:
# just to access o32 and o16 (using correct byte order)
im._debug_multipage = ifd
|
46,525 |
def test_finality_rule_2(state):
# get past first two epochs that finality does not run on
next_epoch(state)
next_epoch(state)
pre_state = deepcopy(state)
test_state = deepcopy(state)
blocks = []
for epoch in range(3):
old_previous_justified_epoch = test_state.previous_justified_epoch
old_previous_justified_root = test_state.previous_justified_root
if epoch == 0:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
check_finality(test_state, prev_state, True, False, False)
if epoch == 1:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
check_finality(test_state, prev_state, False, True, False)
if epoch == 2:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
# finalized by rule 2
check_finality(test_state, prev_state, True, False, True)
assert test_state.finalized_epoch == old_previous_justified_epoch
assert test_state.finalized_root == old_previous_justified_root
blocks += new_blocks
return pre_state, blocks, test_state
|
def test_finality_rule_2(state):
# get past first two epochs that finality does not run on
next_epoch(state)
next_epoch(state)
pre_state = deepcopy(state)
test_state = deepcopy(state)
blocks = []
for epoch in range(3):
old_previous_justified_epoch = test_state.previous_justified_epoch
old_previous_justified_root = test_state.previous_justified_root
if epoch == 0:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
check_finality(test_state, prev_state, True, False, False)
elif epoch == 1:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
check_finality(test_state, prev_state, False, True, False)
if epoch == 2:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
# finalized by rule 2
check_finality(test_state, prev_state, True, False, True)
assert test_state.finalized_epoch == old_previous_justified_epoch
assert test_state.finalized_root == old_previous_justified_root
blocks += new_blocks
return pre_state, blocks, test_state
|
35,423 |
def juggle_file(fn, dbc=None):
env = os.environ.copy()
env["BASEDIR"] = BASEDIR
if dbc:
env["DBC_NAME"] = dbc
subprocess.call(f"plotjuggler --plugin_folders {os.path.join(BASEDIR, 'tools/plotjuggler')} -d {fn}", shell=True, env=env, cwd=juggle_dir)
|
def juggle_file(fn, dbc=None):
env = os.environ.copy()
env["BASEDIR"] = BASEDIR
if dbc:
env["DBC_NAME"] = dbc
subprocess.call(f"plotjuggler --plugin_folders {juggle_dir} -d {fn}", shell=True, env=env, cwd=juggle_dir)
|
42,552 |
def _prepend_skipping_whitespaces(prefix: str, text: str) -> str:
lstripped_text = text.lstrip()
leading_whitespaces = text[:len(text) - len(lstripped_text)]
return f'{leading_whitespaces}{prefix}{lstripped_text}'
|
def _prepend_skipping_whitespaces(prefix: str, text: str) -> str:
lstripped_text = text.lstrip()
leading_whitespaces = text[:len(text) - len(lstripped_text)]
return leading_whitespaces + prefix + lstripped_text
|
26,989 |
def test_task_mapping_with_dag():
with DAG("test-dag", start_date=DEFAULT_DATE) as dag:
task1 = BaseOperator(task_id="op1")
literal = ['a', 'b', 'c']
mapped = MockOperator(task_id='task_2').map(arg2=literal)
finish = MockOperator(task_id="finish")
task1 >> mapped >> finish
assert task1.downstream_list == [mapped]
assert mapped in dag.tasks
# At parse time there should only be two tasks!
assert len(dag.tasks) == 3
assert finish.upstream_list == [mapped]
assert mapped.downstream_list == [finish]
|
def test_task_mapping_with_dag():
with DAG("test-dag", start_date=DEFAULT_DATE) as dag:
task1 = BaseOperator(task_id="op1")
literal = ['a', 'b', 'c']
mapped = MockOperator(task_id='task_2').map(arg2=literal)
finish = MockOperator(task_id="finish")
task1 >> mapped >> finish
assert task1.downstream_list == [mapped]
assert mapped in dag.tasks
# At parse time there should only be three tasks!
assert len(dag.tasks) == 3
assert finish.upstream_list == [mapped]
assert mapped.downstream_list == [finish]
|
22,086 |
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if has_version and offset >= 72 and request[72:]:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
27,861 |
def main():
archs = {
'alex': alex.Alex,
'alex_fp16': alex.AlexFp16,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'googlenetbn_fp16': googlenetbn.GoogLeNetBNFp16,
'nin': nin.NIN,
'resnet50': resnet50.ResNet50,
'resnext50': resnext50.ResNeXt50,
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
parser.add_argument('--dali', action='store_true')
parser.set_defaults(dali=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = parse_device(args)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Initialize the model to train
model = archs[args.arch]()
if args.initmodel:
print('Load model from {}'.format(args.initmodel))
chainer.serializers.load_npz(args.initmodel, model)
model.to_device(device)
device.use()
# Load the mean file
mean = np.load(args.mean)
if args.dali:
if not dali_util._dali_available:
raise RuntimeError('DALI seems not available on your system.')
num_threads = args.loaderjob
if num_threads is None or num_threads <= 0:
num_threads = 1
ch_mean = list(np.average(mean, axis=(1, 2)))
ch_std = [255.0, 255.0, 255.0]
# Setup DALI pipelines
train_pipe = dali_util.DaliPipelineTrain(
args.train, args.root, model.insize, args.batchsize,
num_threads, args.gpu, True, mean=ch_mean, std=ch_std)
val_pipe = dali_util.DaliPipelineVal(
args.val, args.root, model.insize, args.val_batchsize,
num_threads, args.gpu, False, mean=ch_mean, std=ch_std)
train_iter = chainer.iterators.DaliIterator(train_pipe)
val_iter = chainer.iterators.DaliIterator(val_pipe, repeat=False)
# converter = dali_converter
converter = dali_util.DaliConverter(mean=mean, crop_size=model.insize)
else:
# Load the dataset files
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
val = PreprocessedDataset(args.val, args.root, mean, model.insize,
False)
# These iterators load the images with subprocesses running in parallel
# to the training/validation.
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=args.loaderjob)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
converter = dataset.concat_examples
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=converter, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
val_interval = (1 if args.test else 100000), 'iteration'
log_interval = (1 if args.test else 1000), 'iteration'
trainer.extend(extensions.Evaluator(val_iter, model, converter=converter,
device=device), trigger=val_interval)
# TODO(sonots): Temporarily disabled for chainerx. Fix it.
if not (chainerx.is_available() and device.xp is chainerx):
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
|
def main():
archs = {
'alex': alex.Alex,
'alex_fp16': alex.AlexFp16,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'googlenetbn_fp16': googlenetbn.GoogLeNetBNFp16,
'nin': nin.NIN,
'resnet50': resnet50.ResNet50,
'resnext50': resnext50.ResNeXt50,
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
parser.add_argument('--dali', action='store_true')
parser.set_defaults(dali=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = parse_device(args)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Initialize the model to train
model = archs[args.arch]()
if args.initmodel:
print('Load model from {}'.format(args.initmodel))
chainer.serializers.load_npz(args.initmodel, model)
model.to_device(device)
device.use()
# Load the mean file
mean = np.load(args.mean)
if args.dali:
if not dali_util._dali_available:
raise RuntimeError('DALI seems not available on your system.')
num_threads = args.loaderjob
if num_threads is None or num_threads <= 0:
num_threads = 1
ch_mean = list(np.average(mean, axis=(1, 2)))
ch_std = [255.0, 255.0, 255.0]
# Setup DALI pipelines
train_pipe = dali_util.DaliPipelineTrain(
args.train, args.root, model.insize, args.batchsize,
num_threads, args.gpu, True, mean=ch_mean, std=ch_std)
val_pipe = dali_util.DaliPipelineVal(
args.val, args.root, model.insize, args.val_batchsize,
num_threads, args.gpu, False, mean=ch_mean, std=ch_std)
train_iter = chainer.iterators.DaliIterator(train_pipe)
val_iter = chainer.iterators.DaliIterator(val_pipe, repeat=False)
# converter = dali_converter
converter = dali_util.DaliConverter(mean=mean, crop_size=model.insize)
else:
# Load the dataset files
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
val = PreprocessedDataset(args.val, args.root, mean, model.insize,
False)
# These iterators load the images with subprocesses running in parallel
# to the training/validation.
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=args.loaderjob)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
converter = dataset.concat_examples
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=converter, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
val_interval = (1 if args.test else 100000), 'iteration'
log_interval = (1 if args.test else 1000), 'iteration'
trainer.extend(extensions.Evaluator(val_iter, model, converter=converter,
device=device), trigger=val_interval)
# TODO(sonots): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
|
27,958 |
def ctu_mapping(clang_version_info):
"""Clang version dependent ctu mapping tool path and mapping file name.
The path of the mapping tool, which is assumed to be located
inside the installed directory of the analyzer. Certain binary
distributions can postfix the the tool name with the major version
number, the the number and the tool name being separated by a dash. By
default the shorter name is looked up, then if it is not found the
postfixed.
"""
if not clang_version_info:
LOG.debug("No clang version information"
"can not detect ctu mapping tool.")
return None, None
old_mapping_tool_name = 'clang-func-mapping'
old_mapping_file_name = 'externalFnMap.txt'
new_mapping_tool_name = 'clang-extdef-mapping'
new_mapping_file_name = 'externalDefMap.txt'
major_version = clang_version_info.major_version
if major_version > 7:
tool_name = new_mapping_tool_name
mapping_file = new_mapping_file_name
else:
tool_name = old_mapping_tool_name
mapping_file = old_mapping_file_name
installed_dir = clang_version_info.installed_dir
tool_path = os.path.join(installed_dir, tool_name)
if os.path.isfile(tool_path):
return tool_path, mapping_file
LOG.debug(
"Mapping tool '{}' suggested by autodetection is not found in "
"directory reported by Clang '{}'. Trying with version-postfixed "
"filename...".format(tool_path, installed_dir))
postfixed_tool_path = ''.join([tool_path, '-', str(major_version)])
if os.path.isfile(postfixed_tool_path):
return postfixed_tool_path, mapping_file
LOG.debug(
"Postfixed mapping tool '{}' suggested by autodetection is not "
"found in directory reported by Clang '{}'."
.format(postfixed_tool_path, installed_dir))
return None, None
|
def ctu_mapping(clang_version_info):
"""Clang version dependent ctu mapping tool path and mapping file name.
The path of the mapping tool, which is assumed to be located
inside the installed directory of the analyzer. Certain binary
distributions can postfix the the tool name with the major version
number, the the number and the tool name being separated by a dash. By
default the shorter name is looked up, then if it is not found the
postfixed.
"""
if not clang_version_info:
LOG.debug("No clang version information. "
"can not detect ctu mapping tool.")
return None, None
old_mapping_tool_name = 'clang-func-mapping'
old_mapping_file_name = 'externalFnMap.txt'
new_mapping_tool_name = 'clang-extdef-mapping'
new_mapping_file_name = 'externalDefMap.txt'
major_version = clang_version_info.major_version
if major_version > 7:
tool_name = new_mapping_tool_name
mapping_file = new_mapping_file_name
else:
tool_name = old_mapping_tool_name
mapping_file = old_mapping_file_name
installed_dir = clang_version_info.installed_dir
tool_path = os.path.join(installed_dir, tool_name)
if os.path.isfile(tool_path):
return tool_path, mapping_file
LOG.debug(
"Mapping tool '{}' suggested by autodetection is not found in "
"directory reported by Clang '{}'. Trying with version-postfixed "
"filename...".format(tool_path, installed_dir))
postfixed_tool_path = ''.join([tool_path, '-', str(major_version)])
if os.path.isfile(postfixed_tool_path):
return postfixed_tool_path, mapping_file
LOG.debug(
"Postfixed mapping tool '{}' suggested by autodetection is not "
"found in directory reported by Clang '{}'."
.format(postfixed_tool_path, installed_dir))
return None, None
|
7,274 |
def threshold_local(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None, cval=0):
"""Compute a threshold mask image based on local pixel neighborhood.
Also known as adaptive or dynamic thresholding. The threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a
given function, using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Odd size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see ``param`` parameter)
* 'gaussian': apply gaussian filter (see ``param`` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
threshold : (N, M) ndarray
Threshold image. All pixels in the input image higher than the
corresponding pixel in the threshold image are considered foreground.
References
----------
.. [1] Gonzalez, R. C. and Wood, R. E. "Digital Image Processing
(2nd Edition)." Prentice-Hall Inc, 2002: 600-612.
ISBN: 0-201-18075-8
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = image > threshold_local(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = image > threshold_local(image, 15, 'generic',
... param=func)
"""
if block_size % 2 == 0:
raise ValueError("The kwarg ``block_size`` must be odd! Given "
"``block_size`` {0} is even.".format(block_size))
check_nD(image, 2)
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
ndi.generic_filter(image, param, block_size,
output=thresh_image, mode=mode, cval=cval)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode,
cval=cval)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode,
cval=cval)
ndi.convolve1d(thresh_image, mask, axis=1, output=thresh_image,
mode=mode, cval=cval)
elif method == 'median':
ndi.median_filter(image, block_size, output=thresh_image, mode=mode,
cval=cval)
else:
raise ValueError("Invalid method specified. Please use `generic`, "
"`gaussian`, `mean`, or `median`.")
return thresh_image - offset
|
def threshold_local(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None, cval=0):
"""Compute a threshold mask image based on local pixel neighborhood.
Also known as adaptive or dynamic thresholding. The threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a
given function, using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Odd size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see ``param`` parameter)
* 'gaussian': apply gaussian filter (see ``param`` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
threshold : (N, M) ndarray
Threshold image. All pixels in the input image higher than the
corresponding pixel in the threshold image are considered foreground.
References
----------
.. [1] Gonzalez, R. C. and Wood, R. E. "Digital Image Processing
(2nd Edition)." Prentice-Hall Inc., 2002: 600--612.
ISBN: 0-201-18075-8
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = image > threshold_local(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = image > threshold_local(image, 15, 'generic',
... param=func)
"""
if block_size % 2 == 0:
raise ValueError("The kwarg ``block_size`` must be odd! Given "
"``block_size`` {0} is even.".format(block_size))
check_nD(image, 2)
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
ndi.generic_filter(image, param, block_size,
output=thresh_image, mode=mode, cval=cval)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode,
cval=cval)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode,
cval=cval)
ndi.convolve1d(thresh_image, mask, axis=1, output=thresh_image,
mode=mode, cval=cval)
elif method == 'median':
ndi.median_filter(image, block_size, output=thresh_image, mode=mode,
cval=cval)
else:
raise ValueError("Invalid method specified. Please use `generic`, "
"`gaussian`, `mean`, or `median`.")
return thresh_image - offset
|
30,062 |
def test_resolver_future_warning(ens):
with pytest.warns(
FutureWarning,
match="The function signature for resolver\\(\\) will change in v6 to accept 'name' as a "
"param, over 'normalized_name', and the method will normalize the name internally.",
):
ens.resolver('tester.eth')
|
def test_resolver_future_warning(ens):
with pytest.warns(
FutureWarning,
match="The function signature for resolver\\(\\) will change in v6 to accept 'name' as a "
"param, over 'normal_name', and the method will normalize the name internally.",
):
ens.resolver('tester.eth')
|
57,718 |
def parse_dlp_report(report_json):
"""Parses DLP Report for display"""
data_patterns = parse_data_patterns(report_json)
results = CommandResults(
outputs_prefix='DLP.Reports',
outputs_key_field='DataProfile',
outputs=data_patterns,
readable_output=convert_to_human_readable(data_patterns)
)
return_results(results)
|
def parse_dlp_report(report_json):
"""Parses DLP Report for display"""
data_patterns = parse_data_patterns(report_json)
results = CommandResults(
outputs_prefix='DLP.Reports',
outputs_key_field='DataProfile',
outputs=data_patterns,
readable_output=convert_to_human_readable(data_patterns),
raw_response=report_json
)
return_results(results)
|
36,728 |
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not bool(short_empty_elements[tag]):
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(short_empty_elements[tag].value+"/>")
if elem.tail:
write(_escape_cdata(elem.tail))
|
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements[tag]:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(short_empty_elements[tag].value+"/>")
if elem.tail:
write(_escape_cdata(elem.tail))
|
30,124 |
def test_sig_collect_3_multiple_use_fromfile(runtmp, manifest_db_format):
# collect a manifest from two .zip files
protzip = utils.get_test_data('prot/protein.zip')
hpzip = utils.get_test_data('prot/hp.zip')
dayzip = utils.get_test_data('prot/dayhoff.zip')
ext = 'sqlmf' if manifest_db_format == 'sql' else 'csv'
fromfile = runtmp.output('fromfile.txt')
with open(fromfile, 'wt') as fp:
print(protzip, file=fp)
print(hpzip, file=fp)
print(dayzip, file=fp)
runtmp.sourmash('sig', 'collect', '--from-file', 'fromfile.txt',
'-o', f'mf.{ext}', '-F', manifest_db_format)
manifest_fn = runtmp.output(f'mf.{ext}')
manifest = BaseCollectionManifest.load_from_filename(manifest_fn)
assert len(manifest) == 6
md5_list = [ row['md5'] for row in manifest.rows ]
assert '16869d2c8a1d29d1c8e56f5c561e585e' in md5_list
assert '120d311cc785cc9d0df9dc0646b2b857' in md5_list
assert 'ea2a1ad233c2908529d124a330bcb672' in md5_list
assert 'bb0e6d90df01b7bd5d0956a5f9e3ed12' in md5_list
assert 'fbca5e5211e4d58427997fd5c8343e9a' in md5_list
assert '1cbd888bf910f83ad8f1715509183223' in md5_list
locations = set([ row['internal_location'] for row in manifest.rows ])
assert protzip in locations
assert hpzip in locations
assert dayzip in locations
assert len(locations) == 3, locations
|
def test_sig_collect_3_multiple_use_fromfile(runtmp, manifest_db_format):
# collect a manifest from two .zip files using --from-file
protzip = utils.get_test_data('prot/protein.zip')
hpzip = utils.get_test_data('prot/hp.zip')
dayzip = utils.get_test_data('prot/dayhoff.zip')
ext = 'sqlmf' if manifest_db_format == 'sql' else 'csv'
fromfile = runtmp.output('fromfile.txt')
with open(fromfile, 'wt') as fp:
print(protzip, file=fp)
print(hpzip, file=fp)
print(dayzip, file=fp)
runtmp.sourmash('sig', 'collect', '--from-file', 'fromfile.txt',
'-o', f'mf.{ext}', '-F', manifest_db_format)
manifest_fn = runtmp.output(f'mf.{ext}')
manifest = BaseCollectionManifest.load_from_filename(manifest_fn)
assert len(manifest) == 6
md5_list = [ row['md5'] for row in manifest.rows ]
assert '16869d2c8a1d29d1c8e56f5c561e585e' in md5_list
assert '120d311cc785cc9d0df9dc0646b2b857' in md5_list
assert 'ea2a1ad233c2908529d124a330bcb672' in md5_list
assert 'bb0e6d90df01b7bd5d0956a5f9e3ed12' in md5_list
assert 'fbca5e5211e4d58427997fd5c8343e9a' in md5_list
assert '1cbd888bf910f83ad8f1715509183223' in md5_list
locations = set([ row['internal_location'] for row in manifest.rows ])
assert protzip in locations
assert hpzip in locations
assert dayzip in locations
assert len(locations) == 3, locations
|
21,256 |
def needs_ocr(content):
"""Determines if OCR is needed for a PACER PDF.
Every document in PACER (pretty much) has the case number written on the
top of every page. This is a great practice, but it means that to test if
OCR is needed, we need to remove this text and see if anything is left. The
line usually looks something like:
Case 2:06-cv-00376-SRW Document 1-2 Filed 04/25/2006 Page 1 of 1
Appeal: 15-1504 Doc: 6 Filed: 05/12/2015 Pg: 1 of 4
Appellate Case: 14-3253 Page: 1 Date Filed: 01/14/2015 Entry ID: 4234486
USCA Case #16-1062 Document #1600692 Filed: 02/24/2016 Page 1 of 3
USCA11 Case: 21-12355 Date Filed: 07/13/202 Page: 1 of 2
This function removes these lines so that if no text remains, we can be sure
that the PDF needs OCR.
:param content: The content of a PDF.
:return: boolean indicating if OCR is needed.
"""
for line in content.splitlines():
line = line.strip()
if line.startswith(("Case", "Appellate", "Appeal", "USCA", "USCA11")):
continue
elif line:
# We found a line with good content. No OCR needed.
return False
# We arrive here if no line was found containing good content.
return True
|
def needs_ocr(content):
"""Determines if OCR is needed for a PACER PDF.
Every document in PACER (pretty much) has the case number written on the
top of every page. This is a great practice, but it means that to test if
OCR is needed, we need to remove this text and see if anything is left. The
line usually looks something like:
Case 2:06-cv-00376-SRW Document 1-2 Filed 04/25/2006 Page 1 of 1
Appeal: 15-1504 Doc: 6 Filed: 05/12/2015 Pg: 1 of 4
Appellate Case: 14-3253 Page: 1 Date Filed: 01/14/2015 Entry ID: 4234486
USCA Case #16-1062 Document #1600692 Filed: 02/24/2016 Page 1 of 3
USCA11 Case: 21-12355 Date Filed: 07/13/202 Page: 1 of 2
This function removes these lines so that if no text remains, we can be sure
that the PDF needs OCR.
:param content: The content of a PDF.
:return: boolean indicating if OCR is needed.
"""
for line in content.splitlines():
line = line.strip()
if line.startswith(("Case", "Appellate", "Appeal", "USCA")):
continue
elif line:
# We found a line with good content. No OCR needed.
return False
# We arrive here if no line was found containing good content.
return True
|
35,414 |
def get_eps_factor(lr, plot=False):
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
engaged = False
steering_pressed = False
torque_cmd, eps_torque = None, None
cmds, eps = [], []
for msg in all_msgs:
if msg.which() == 'carState':
steering_pressed = msg.carState.steeringpressed
if msg.which() != 'can':
continue
for m in msg.can:
if m.address == 0x2e4 and m.src == 128:
engaged = bool(m.dat[0] & 1)
torque_cmd = to_signed((m.dat[1] << 8) | m.dat[2], 16)
elif m.address == 0x260 and m.src == 0:
eps_torque = to_signed((m.dat[5] << 8) | m.dat[6], 16)
if engaged and torque_cmd is not None and eps_torque is not None and not steering_pressed:
cmds.append(torque_cmd)
eps.append(eps_torque)
else:
if len(cmds) > MIN_SAMPLES:
break
cmds, eps = [], []
if len(cmds) < MIN_SAMPLES:
raise Exception("too few samples found in route")
lm = linear_model.LinearRegression(fit_intercept=False)
lm.fit(np.array(cmds).reshape(-1, 1), eps)
scale_factor = 1. / lm.coef_[0]
if plot:
plt.plot(np.array(eps) * scale_factor)
plt.plot(cmds)
plt.show()
return scale_factor
|
def get_eps_factor(lr, plot=False):
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
engaged = False
steering_pressed = False
torque_cmd, eps_torque = None, None
cmds, eps = [], []
for msg in all_msgs:
if msg.which() == 'carState':
steering_pressed = msg.carState.steeringPressed
if msg.which() != 'can':
continue
for m in msg.can:
if m.address == 0x2e4 and m.src == 128:
engaged = bool(m.dat[0] & 1)
torque_cmd = to_signed((m.dat[1] << 8) | m.dat[2], 16)
elif m.address == 0x260 and m.src == 0:
eps_torque = to_signed((m.dat[5] << 8) | m.dat[6], 16)
if engaged and torque_cmd is not None and eps_torque is not None and not steering_pressed:
cmds.append(torque_cmd)
eps.append(eps_torque)
else:
if len(cmds) > MIN_SAMPLES:
break
cmds, eps = [], []
if len(cmds) < MIN_SAMPLES:
raise Exception("too few samples found in route")
lm = linear_model.LinearRegression(fit_intercept=False)
lm.fit(np.array(cmds).reshape(-1, 1), eps)
scale_factor = 1. / lm.coef_[0]
if plot:
plt.plot(np.array(eps) * scale_factor)
plt.plot(cmds)
plt.show()
return scale_factor
|
35,223 |
def linkcode_resolve(domain, info):
if domain != 'py' or not info['module']:
return None
# Import the object from module path
obj = _import_object_from_name(info['module'], info['fullname'])
# If it's not defined in the internal module, return None.
mod = inspect.getmodule(obj)
if mod is None:
return None
if not mod.__name__.split('.')[0] in _top_modules:
return None
# If it's wrapped (e.g., by `contextlib.contextmanager`), unwrap it
for _ in range(10):
if not hasattr(obj, '__wrapped__'):
break
obj = obj.__wrapped__
else:
raise RuntimeError(f'nested too deep: {info}')
# Get the source file name and line number at which obj is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
# obj is not a module, class, function, ..etc.
return None
# inspect can return None for cython objects
if filename is None:
filename = inspect.getfile(obj)
for ext in importlib.machinery.EXTENSION_SUFFIXES:
if filename.endswith(ext):
filename = filename[:-len(ext)] + '.pyx'
break
else:
return None
linenum = None
else:
# Get the source line number
_, linenum = inspect.getsourcelines(obj)
assert isinstance(linenum, int)
filename = os.path.realpath(filename)
relpath = _get_source_relative_path(filename)
return 'https://github.com/cupy/cupy/blob/{}/{}'.format(
tag, relpath if linenum is None else f'{relpath}#L{linenum}')
|
def linkcode_resolve(domain, info):
if domain != 'py' or not info['module']:
return None
# Import the object from module path
obj = _import_object_from_name(info['module'], info['fullname'])
# If it's not defined in the internal module, return None.
mod = inspect.getmodule(obj)
if mod is None:
return None
if not mod.__name__.split('.')[0] in _top_modules:
return None
# If it's wrapped (e.g., by `contextlib.contextmanager`), unwrap it
obj = inspect.unwrap(obj)
# Get the source file name and line number at which obj is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
# obj is not a module, class, function, ..etc.
return None
# inspect can return None for cython objects
if filename is None:
filename = inspect.getfile(obj)
for ext in importlib.machinery.EXTENSION_SUFFIXES:
if filename.endswith(ext):
filename = filename[:-len(ext)] + '.pyx'
break
else:
return None
linenum = None
else:
# Get the source line number
_, linenum = inspect.getsourcelines(obj)
assert isinstance(linenum, int)
filename = os.path.realpath(filename)
relpath = _get_source_relative_path(filename)
return 'https://github.com/cupy/cupy/blob/{}/{}'.format(
tag, relpath if linenum is None else f'{relpath}#L{linenum}')
|
37,644 |
def execute(
experiments,
backend,
basis_gates=None,
coupling_map=None, # circuit transpile options
backend_properties=None,
initial_layout=None,
seed_transpiler=None,
optimization_level=None,
pass_manager=None,
qobj_id=None,
qobj_header=None,
shots=None, # common run options
memory=None,
max_credits=None,
seed_simulator=None,
default_qubit_los=None,
default_meas_los=None, # schedule run options
qubit_lo_range=None,
meas_lo_range=None,
schedule_los=None,
meas_level=None,
meas_return=None,
memory_slots=None,
memory_slot_size=None,
rep_time=None,
rep_delay=None,
parameter_binds=None,
schedule_circuit=False,
inst_map=None,
meas_map=None,
scheduling_method=None,
init_qubits=None,
**run_config,
):
"""Execute a list of :class:`qiskit.circuit.QuantumCircuit` or
:class:`qiskit.pulse.Schedule` on a backend.
The execution is asynchronous, and a handle to a job instance is returned.
Args:
experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):
Circuit(s) or pulse schedule(s) to execute
backend (Backend):
Backend to execute circuits on.
Transpiler options are automatically grabbed from
backend.configuration() and backend.properties().
If any other option is explicitly set (e.g. coupling_map), it
will override the backend's.
basis_gates (list[str]):
List of basis gate names to unroll to.
e.g: ``['u1', 'u2', 'u3', 'cx']``
If ``None``, do not unroll.
coupling_map (CouplingMap or list): Coupling map (perhaps custom) to
target in mapping. Multiple formats are supported:
#. CouplingMap instance
#. list
Must be given as an adjacency matrix, where each entry
specifies all two-qubit interactions supported by backend
e.g:
``[[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]``
backend_properties (BackendProperties):
Properties returned by a backend, including information on gate
errors, readout errors, qubit coherence times, etc. Find a backend
that provides this information with:
``backend.properties()``
initial_layout (Layout or dict or list):
Initial position of virtual qubits on physical qubits.
If this layout makes the circuit compatible with the coupling_map
constraints, it will be used.
The final layout is not guaranteed to be the same, as the transpiler
may permute qubits through swaps or other means.
Multiple formats are supported:
#. :class:`qiskit.transpiler.Layout` instance
#. ``dict``:
virtual to physical::
{qr[0]: 0,
qr[1]: 3,
qr[2]: 5}
physical to virtual::
{0: qr[0],
3: qr[1],
5: qr[2]}
#. ``list``
virtual to physical::
[0, 3, 5] # virtual qubits are ordered (in addition to named)
physical to virtual::
[qr[0], None, None, qr[1], None, qr[2]]
seed_transpiler (int): Sets random seed for the stochastic parts of the transpiler
optimization_level (int): How much optimization to perform on the circuits.
Higher levels generate more optimized circuits,
at the expense of longer transpilation time.
#. No optimization
#. Light optimization
#. Heavy optimization
#. Highest optimization
If None, level 1 will be chosen as default.
pass_manager (PassManager): The pass manager to use during transpilation. If this
arg is present, auto-selection of pass manager based on the transpile options
will be turned off and this pass manager will be used directly.
qobj_id (str): DEPRECATED: String identifier to annotate the Qobj. This has no effect
and the :attr:`~.QuantumCircuit.name` attribute of the input circuit(s) should be used
instead.
qobj_header (QobjHeader or dict): DEPRECATED: User input that will be inserted in Qobj
header, and will also be copied to the corresponding :class:`qiskit.result.Result`
header. Headers do not affect the run. Headers do not affect the run. This kwarg
has no effect anymore and the :attr:`~.QuantumCircuit.metadata` attribute of the
input circuit(s) should be used instead.
shots (int): Number of repetitions of each circuit, for sampling. Default: 1024
memory (bool): If True, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option. Default: False
max_credits (int): DEPRECATED This parameter is deprecated as of Qiskit Terra 0.20.0
and will be removed in a future release. This parameter has no effect on modern
IBM Quantum systems, no alternative is necessary.
seed_simulator (int): Random seed to control sampling, for when backend is a simulator
default_qubit_los (Optional[List[float]]): List of job level qubit drive LO frequencies
in Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits``.
default_meas_los (Optional[List[float]]): List of job level measurement LO frequencies in
Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits``.
qubit_lo_range (Optional[List[List[float]]]): List of job level drive LO ranges each of form
``[range_min, range_max]`` in Hz. Used to validate ``qubit_lo_freq``. Must have length
``n_qubits``.
meas_lo_range (Optional[List[List[float]]]): List of job level measurement LO ranges each of
form ``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have
length ``n_qubits``.
schedule_los (list):
Experiment level (ie circuit or schedule) LO frequency configurations for qubit drive
and measurement channels. These values override the job level values from
``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm
and pulse jobs.
If a single LO config or dict is used, the values are set at job level. If a list is
used, the list must be the size of the number of experiments in the job, except in the
case of a single experiment. In this case, a frequency sweep will be assumed and one
experiment will be created for every list entry.
Not every channel is required to be specified. If not specified, the backend default
value will be used.
meas_level (int or MeasLevel): Set the appropriate level of the
measurement output for pulse experiments.
meas_return (str or MeasReturn): Level of measurement data for the
backend to return For ``meas_level`` 0 and 1:
``"single"`` returns information from every shot.
``"avg"`` returns average measurement output (averaged over number
of shots).
memory_slots (int): Number of classical memory slots used in this job.
memory_slot_size (int): Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds (list[dict]): List of Parameter bindings over which the set of
experiments will be executed. Each list element (bind) should be of the form
``{Parameter1: value1, Parameter2: value2, ...}``. All binds will be
executed across all experiments, e.g. if parameter_binds is a
length-n list, and there are m experiments, a total of :math:`m x n`
experiments will be run (one for each experiment/bind pair).
schedule_circuit (bool): If ``True``, ``experiments`` will be converted to
:class:`qiskit.pulse.Schedule` objects prior to execution.
inst_map (InstructionScheduleMap):
Mapping of circuit operations to pulse schedules. If None, defaults to the
``instruction_schedule_map`` of ``backend``.
meas_map (list(list(int))):
List of sets of qubits that must be measured together. If None, defaults to
the ``meas_map`` of ``backend``.
scheduling_method (str or list(str)):
Optionally specify a particular scheduling method.
init_qubits (bool): Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
run_config (dict):
Extra arguments used to configure the run (e.g. for Aer configurable backends).
Refer to the backend documentation for details on these arguments.
Note: for now, these keyword arguments will both be copied to the
Qobj config, and passed to backend.run()
Returns:
Job: returns job instance derived from Job
Raises:
QiskitError: if the execution cannot be interpreted as either circuits or schedules
Example:
Construct a 5-qubit GHZ circuit and execute 4321 shots on a backend.
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, BasicAer
backend = BasicAer.get_backend('qasm_simulator')
qc = QuantumCircuit(5, 5)
qc.h(0)
qc.cx(0, range(1, 5))
qc.measure_all()
job = execute(qc, backend, shots=4321)
"""
if isinstance(experiments, (Schedule, ScheduleBlock)) or (
isinstance(experiments, list) and isinstance(experiments[0], (Schedule, ScheduleBlock))
):
# do not transpile a schedule circuit
if schedule_circuit:
raise QiskitError("Must supply QuantumCircuit to schedule circuit.")
elif pass_manager is not None:
# transpiling using pass_manager
_check_conflicting_argument(
optimization_level=optimization_level,
basis_gates=basis_gates,
coupling_map=coupling_map,
seed_transpiler=seed_transpiler,
backend_properties=backend_properties,
initial_layout=initial_layout,
)
experiments = pass_manager.run(experiments)
else:
# transpiling the circuits using given transpile options
experiments = transpile(
experiments,
basis_gates=basis_gates,
coupling_map=coupling_map,
backend_properties=backend_properties,
initial_layout=initial_layout,
seed_transpiler=seed_transpiler,
optimization_level=optimization_level,
backend=backend,
)
if schedule_circuit:
experiments = schedule(
circuits=experiments,
backend=backend,
inst_map=inst_map,
meas_map=meas_map,
method=scheduling_method,
)
if max_credits is not None:
warnings.warn(
"The `max_credits` parameter is deprecated as of Qiskit Terra 0.20.0, "
"and will be removed in a future release. This parameter has no effect on "
"modern IBM Quantum systems, and no alternative is necessary.",
DeprecationWarning,
stacklevel=2,
)
if qobj_id is not None:
warnings.warn(
"The qobj_id argument is deprecated as of the Qiskit Terra 0.21.0, "
"and will be remvoed in a future release. This argument has no effect and "
"is not used by any backends."
)
if qobj_header is not None:
warnings.warn(
"The qobj_header argument is deprecated as of the Qiskit Terra 0.21.0, "
"and will be remvoed in a future release. This argument has no effect and "
"is not used by any backends."
)
if isinstance(backend, Backend):
start_time = time()
run_kwargs = {
"shots": shots,
"memory": memory,
"seed_simulator": seed_simulator,
"qubit_lo_freq": default_qubit_los,
"meas_lo_freq": default_meas_los,
"qubit_lo_range": qubit_lo_range,
"meas_lo_range": meas_lo_range,
"schedule_los": schedule_los,
"meas_level": meas_level,
"meas_return": meas_return,
"memory_slots": memory_slots,
"memory_slot_size": memory_slot_size,
"rep_time": rep_time,
"rep_delay": rep_delay,
"init_qubits": init_qubits,
}
for key in list(run_kwargs.keys()):
if not hasattr(backend.options, key):
if run_kwargs[key] is not None:
logger.info(
"%s backend doesn't support option %s so not passing that kwarg to run()",
backend.name,
key,
)
del run_kwargs[key]
elif run_kwargs[key] is None:
del run_kwargs[key]
if parameter_binds:
run_kwargs["parameter_binds"] = parameter_binds
run_kwargs.update(run_config)
job = backend.run(experiments, **run_kwargs)
end_time = time()
_log_submission_time(start_time, end_time)
else:
raise QiskitError("Invalid backend type %s" % type(backend))
return job
|
def execute(
experiments,
backend,
basis_gates=None,
coupling_map=None, # circuit transpile options
backend_properties=None,
initial_layout=None,
seed_transpiler=None,
optimization_level=None,
pass_manager=None,
qobj_id=None,
qobj_header=None,
shots=None, # common run options
memory=None,
max_credits=None,
seed_simulator=None,
default_qubit_los=None,
default_meas_los=None, # schedule run options
qubit_lo_range=None,
meas_lo_range=None,
schedule_los=None,
meas_level=None,
meas_return=None,
memory_slots=None,
memory_slot_size=None,
rep_time=None,
rep_delay=None,
parameter_binds=None,
schedule_circuit=False,
inst_map=None,
meas_map=None,
scheduling_method=None,
init_qubits=None,
**run_config,
):
"""Execute a list of :class:`qiskit.circuit.QuantumCircuit` or
:class:`qiskit.pulse.Schedule` on a backend.
The execution is asynchronous, and a handle to a job instance is returned.
Args:
experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):
Circuit(s) or pulse schedule(s) to execute
backend (Backend):
Backend to execute circuits on.
Transpiler options are automatically grabbed from
backend.configuration() and backend.properties().
If any other option is explicitly set (e.g. coupling_map), it
will override the backend's.
basis_gates (list[str]):
List of basis gate names to unroll to.
e.g: ``['u1', 'u2', 'u3', 'cx']``
If ``None``, do not unroll.
coupling_map (CouplingMap or list): Coupling map (perhaps custom) to
target in mapping. Multiple formats are supported:
#. CouplingMap instance
#. list
Must be given as an adjacency matrix, where each entry
specifies all two-qubit interactions supported by backend
e.g:
``[[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]``
backend_properties (BackendProperties):
Properties returned by a backend, including information on gate
errors, readout errors, qubit coherence times, etc. Find a backend
that provides this information with:
``backend.properties()``
initial_layout (Layout or dict or list):
Initial position of virtual qubits on physical qubits.
If this layout makes the circuit compatible with the coupling_map
constraints, it will be used.
The final layout is not guaranteed to be the same, as the transpiler
may permute qubits through swaps or other means.
Multiple formats are supported:
#. :class:`qiskit.transpiler.Layout` instance
#. ``dict``:
virtual to physical::
{qr[0]: 0,
qr[1]: 3,
qr[2]: 5}
physical to virtual::
{0: qr[0],
3: qr[1],
5: qr[2]}
#. ``list``
virtual to physical::
[0, 3, 5] # virtual qubits are ordered (in addition to named)
physical to virtual::
[qr[0], None, None, qr[1], None, qr[2]]
seed_transpiler (int): Sets random seed for the stochastic parts of the transpiler
optimization_level (int): How much optimization to perform on the circuits.
Higher levels generate more optimized circuits,
at the expense of longer transpilation time.
#. No optimization
#. Light optimization
#. Heavy optimization
#. Highest optimization
If None, level 1 will be chosen as default.
pass_manager (PassManager): The pass manager to use during transpilation. If this
arg is present, auto-selection of pass manager based on the transpile options
will be turned off and this pass manager will be used directly.
qobj_id (str): DEPRECATED: String identifier to annotate the Qobj. This has no effect
and the :attr:`~.QuantumCircuit.name` attribute of the input circuit(s) should be used
instead.
qobj_header (QobjHeader or dict): DEPRECATED: User input that will be inserted in Qobj
header, and will also be copied to the corresponding :class:`qiskit.result.Result`
header. Headers do not affect the run. Headers do not affect the run. This kwarg
has no effect anymore and the :attr:`~.QuantumCircuit.metadata` attribute of the
input circuit(s) should be used instead.
shots (int): Number of repetitions of each circuit, for sampling. Default: 1024
memory (bool): If True, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option. Default: False
max_credits (int): DEPRECATED This parameter is deprecated as of Qiskit Terra 0.20.0
and will be removed in a future release. This parameter has no effect on modern
IBM Quantum systems, no alternative is necessary.
seed_simulator (int): Random seed to control sampling, for when backend is a simulator
default_qubit_los (Optional[List[float]]): List of job level qubit drive LO frequencies
in Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits``.
default_meas_los (Optional[List[float]]): List of job level measurement LO frequencies in
Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits``.
qubit_lo_range (Optional[List[List[float]]]): List of job level drive LO ranges each of form
``[range_min, range_max]`` in Hz. Used to validate ``qubit_lo_freq``. Must have length
``n_qubits``.
meas_lo_range (Optional[List[List[float]]]): List of job level measurement LO ranges each of
form ``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have
length ``n_qubits``.
schedule_los (list):
Experiment level (ie circuit or schedule) LO frequency configurations for qubit drive
and measurement channels. These values override the job level values from
``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm
and pulse jobs.
If a single LO config or dict is used, the values are set at job level. If a list is
used, the list must be the size of the number of experiments in the job, except in the
case of a single experiment. In this case, a frequency sweep will be assumed and one
experiment will be created for every list entry.
Not every channel is required to be specified. If not specified, the backend default
value will be used.
meas_level (int or MeasLevel): Set the appropriate level of the
measurement output for pulse experiments.
meas_return (str or MeasReturn): Level of measurement data for the
backend to return For ``meas_level`` 0 and 1:
``"single"`` returns information from every shot.
``"avg"`` returns average measurement output (averaged over number
of shots).
memory_slots (int): Number of classical memory slots used in this job.
memory_slot_size (int): Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds (list[dict]): List of Parameter bindings over which the set of
experiments will be executed. Each list element (bind) should be of the form
``{Parameter1: value1, Parameter2: value2, ...}``. All binds will be
executed across all experiments, e.g. if parameter_binds is a
length-n list, and there are m experiments, a total of :math:`m x n`
experiments will be run (one for each experiment/bind pair).
schedule_circuit (bool): If ``True``, ``experiments`` will be converted to
:class:`qiskit.pulse.Schedule` objects prior to execution.
inst_map (InstructionScheduleMap):
Mapping of circuit operations to pulse schedules. If None, defaults to the
``instruction_schedule_map`` of ``backend``.
meas_map (list(list(int))):
List of sets of qubits that must be measured together. If None, defaults to
the ``meas_map`` of ``backend``.
scheduling_method (str or list(str)):
Optionally specify a particular scheduling method.
init_qubits (bool): Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
run_config (dict):
Extra arguments used to configure the run (e.g. for Aer configurable backends).
Refer to the backend documentation for details on these arguments.
Note: for now, these keyword arguments will both be copied to the
Qobj config, and passed to backend.run()
Returns:
Job: returns job instance derived from Job
Raises:
QiskitError: if the execution cannot be interpreted as either circuits or schedules
Example:
Construct a 5-qubit GHZ circuit and execute 4321 shots on a backend.
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, BasicAer
backend = BasicAer.get_backend('qasm_simulator')
qc = QuantumCircuit(5, 5)
qc.h(0)
qc.cx(0, range(1, 5))
qc.measure_all()
job = execute(qc, backend, shots=4321)
"""
if isinstance(experiments, (Schedule, ScheduleBlock)) or (
isinstance(experiments, list) and isinstance(experiments[0], (Schedule, ScheduleBlock))
):
# do not transpile a schedule circuit
if schedule_circuit:
raise QiskitError("Must supply QuantumCircuit to schedule circuit.")
elif pass_manager is not None:
# transpiling using pass_manager
_check_conflicting_argument(
optimization_level=optimization_level,
basis_gates=basis_gates,
coupling_map=coupling_map,
seed_transpiler=seed_transpiler,
backend_properties=backend_properties,
initial_layout=initial_layout,
)
experiments = pass_manager.run(experiments)
else:
# transpiling the circuits using given transpile options
experiments = transpile(
experiments,
basis_gates=basis_gates,
coupling_map=coupling_map,
backend_properties=backend_properties,
initial_layout=initial_layout,
seed_transpiler=seed_transpiler,
optimization_level=optimization_level,
backend=backend,
)
if schedule_circuit:
experiments = schedule(
circuits=experiments,
backend=backend,
inst_map=inst_map,
meas_map=meas_map,
method=scheduling_method,
)
if max_credits is not None:
warnings.warn(
"The `max_credits` parameter is deprecated as of Qiskit Terra 0.20.0, "
"and will be removed in a future release. This parameter has no effect on "
"modern IBM Quantum systems, and no alternative is necessary.",
DeprecationWarning,
stacklevel=2,
)
if qobj_id is not None:
warnings.warn(
"The qobj_id argument is deprecated as of the Qiskit Terra 0.21.0, "
"and will be remvoed in a future release. This argument has no effect and "
"is not used by any backends."
)
if qobj_header is not None:
warnings.warn(
"The qobj_header argument is deprecated as of the Qiskit Terra 0.21.0, "
"and will be remvoed in a future release. This argument has no effect and "
"is not used by any backends."
)
if isinstance(backend, Backend):
start_time = time()
run_kwargs = {
"shots": shots,
"memory": memory,
"seed_simulator": seed_simulator,
"qubit_lo_freq": default_qubit_los,
"meas_lo_freq": default_meas_los,
"qubit_lo_range": qubit_lo_range,
"meas_lo_range": meas_lo_range,
"schedule_los": schedule_los,
"meas_level": meas_level,
"meas_return": meas_return,
"memory_slots": memory_slots,
"memory_slot_size": memory_slot_size,
"rep_time": rep_time,
"rep_delay": rep_delay,
"init_qubits": init_qubits,
}
for key in list(run_kwargs.keys()):
if not hasattr(backend.options, key):
if run_kwargs[key] is not None:
logger.info(
"%s backend doesn't support option %s so not passing that kwarg to run()",
backend.name,
key,
)
del run_kwargs[key]
elif run_kwargs[key] is None:
del run_kwargs[key]
if parameter_binds:
run_kwargs["parameter_binds"] = parameter_binds
run_kwargs.update(run_config)
job = backend.run(experiments, **run_kwargs)
end_time = time()
_log_submission_time(start_time, end_time)
else:
raise QiskitError("Invalid backend type %s" % type(backend))
return job
|
30,157 |
def fetch_production(
zone_key="CA-QC",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
) -> dict:
"""Requests the last known production mix (in MW) of a given region.
In this particular case, translated mapping of JSON keys are also required"""
def if_exists(elem: dict, etype: str):
english = {
"hydraulique": "hydro",
"thermique": "thermal",
"solaire": "solar",
"eolien": "wind",
"autres": "unknown",
"valeurs": "values",
}
english = {v: k for k, v in english.items()}
try:
return elem["valeurs"][english[etype]]
except KeyError:
return 0.0
data = _fetch_quebec_production()
for elem in reversed(data["details"]):
if elem["valeurs"]["total"] != 0:
return {
"zoneKey": zone_key,
"datetime": elem["date"],
"production": {
"biomass": 0.0,
"coal": 0.0,
"gas": 0.0,
"hydro": if_exists(elem, "hydro"),
"nuclear": 0.0,
"oil": 0.0,
"solar": if_exists(elem, "solar"),
"wind": if_exists(elem, "wind"),
"geothermal": if_exists(elem, "geothermal"),
"unknown": if_exists(elem, "unknown"),
},
"source": "hydroquebec.com",
}
|
def fetch_production(
zone_key="CA-QC",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
) -> dict:
"""Requests the last known production mix (in MW) of a given region.
In this particular case, translated mapping of JSON keys are also required"""
def if_exists(elem: dict, etype: str):
english = {
"hydraulique": "hydro",
"thermique": "thermal",
"solaire": "solar",
"eolien": "wind",
"autres": "unknown",
"valeurs": "values",
}
english = {v: k for k, v in english.items()}
try:
return elem["valeurs"][english[etype]]
except KeyError:
return 0.0
data = _fetch_quebec_production()
for elem in reversed(data["details"]):
if elem["valeurs"]["total"] != 0:
return {
"zoneKey": zone_key,
"datetime": arrow.get(elem["date"]).datetime,
"production": {
"biomass": 0.0,
"coal": 0.0,
"gas": 0.0,
"hydro": if_exists(elem, "hydro"),
"nuclear": 0.0,
"oil": 0.0,
"solar": if_exists(elem, "solar"),
"wind": if_exists(elem, "wind"),
"geothermal": if_exists(elem, "geothermal"),
"unknown": if_exists(elem, "unknown"),
},
"source": "hydroquebec.com",
}
|
25,211 |
def test_kw_only_decorator() -> None:
"""Test that we update the signature correctly based on the keyword.
Keyword was introduced in PY310.
"""
foodef, bardef, cee, dee = astroid.extract_node(
"""
from dataclasses import dataclass
@dataclass(kw_only=True)
class Foo:
a: int
b: str
@dataclass(kw_only=False)
class Bar(Foo):
c: int
@dataclass(kw_only=False)
class Cee(Bar):
d: int
@dataclass(kw_only=True)
class Dee(Cee):
e: int
Foo.__init__ #@
Bar.__init__ #@
Cee.__init__ #@
Dee.__init__ #@
"""
)
foo_init: bases.UnboundMethod = next(foodef.infer())
if PY310_PLUS:
assert [a.name for a in foo_init.args.args] == ["self"]
assert [a.name for a in foo_init.args.kwonlyargs] == ["a", "b"]
else:
assert [a.name for a in foo_init.args.args] == ["self", "a", "b"]
assert [a.name for a in foo_init.args.kwonlyargs] == []
bar_init: bases.UnboundMethod = next(bardef.infer())
if PY310_PLUS:
assert [a.name for a in bar_init.args.args] == ["self", "c"]
assert [a.name for a in bar_init.args.kwonlyargs] == ["a", "b"]
else:
assert [a.name for a in bar_init.args.args] == ["self", "a", "b", "c"]
assert [a.name for a in bar_init.args.kwonlyargs] == []
cee_init: bases.UnboundMethod = next(cee.infer())
if PY310_PLUS:
assert [a.name for a in cee_init.args.args] == ["self", "c", "d"]
assert [a.name for a in cee_init.args.kwonlyargs] == ["a", "b"]
else:
assert [a.name for a in cee_init.args.args] == ["self", "a", "b", "c", "d"]
assert [a.name for a in cee_init.args.kwonlyargs] == []
dee_init: bases.UnboundMethod = next(dee.infer())
if PY310_PLUS:
assert [a.name for a in dee_init.args.args] == ["self", "c", "d"]
assert [a.name for a in dee_init.args.kwonlyargs] == ["a", "b", "e"]
else:
assert [a.name for a in dee_init.args.args] == ["self", "a", "b", "c", "d", "e"]
assert [a.name for a in dee_init.args.kwonlyargs] == []
|
def test_kw_only_decorator() -> None:
"""Test that we update the signature correctly based on the keyword.
kw_only was introduced in PY310.
"""
foodef, bardef, cee, dee = astroid.extract_node(
"""
from dataclasses import dataclass
@dataclass(kw_only=True)
class Foo:
a: int
b: str
@dataclass(kw_only=False)
class Bar(Foo):
c: int
@dataclass(kw_only=False)
class Cee(Bar):
d: int
@dataclass(kw_only=True)
class Dee(Cee):
e: int
Foo.__init__ #@
Bar.__init__ #@
Cee.__init__ #@
Dee.__init__ #@
"""
)
foo_init: bases.UnboundMethod = next(foodef.infer())
if PY310_PLUS:
assert [a.name for a in foo_init.args.args] == ["self"]
assert [a.name for a in foo_init.args.kwonlyargs] == ["a", "b"]
else:
assert [a.name for a in foo_init.args.args] == ["self", "a", "b"]
assert [a.name for a in foo_init.args.kwonlyargs] == []
bar_init: bases.UnboundMethod = next(bardef.infer())
if PY310_PLUS:
assert [a.name for a in bar_init.args.args] == ["self", "c"]
assert [a.name for a in bar_init.args.kwonlyargs] == ["a", "b"]
else:
assert [a.name for a in bar_init.args.args] == ["self", "a", "b", "c"]
assert [a.name for a in bar_init.args.kwonlyargs] == []
cee_init: bases.UnboundMethod = next(cee.infer())
if PY310_PLUS:
assert [a.name for a in cee_init.args.args] == ["self", "c", "d"]
assert [a.name for a in cee_init.args.kwonlyargs] == ["a", "b"]
else:
assert [a.name for a in cee_init.args.args] == ["self", "a", "b", "c", "d"]
assert [a.name for a in cee_init.args.kwonlyargs] == []
dee_init: bases.UnboundMethod = next(dee.infer())
if PY310_PLUS:
assert [a.name for a in dee_init.args.args] == ["self", "c", "d"]
assert [a.name for a in dee_init.args.kwonlyargs] == ["a", "b", "e"]
else:
assert [a.name for a in dee_init.args.args] == ["self", "a", "b", "c", "d", "e"]
assert [a.name for a in dee_init.args.kwonlyargs] == []
|
13,883 |
def test_build(
compiled,
format,
available_targets,
generate_reference,
update_reference,
archive_differences,
):
name = compiled
scrub = SCRUBBERS[format]
output_pattern = OUTPUT_PATTERN[format]
assert_equals = ASSERT_EQUALS.get(format, None)
encoding = "utf8"
if format == "html" and name.startswith("html-encoding-"):
encoding = re.match("^html-encoding-(.*)$", name).group(1)
os.chdir(os.path.join(basedir, name))
assert run(["make", format])
reference_dir = REFERENCE_DIR
if (platform.system() == "Windows") and os.path.isdir(reference_dir + "-Windows"):
reference_dir += "-Windows"
if generate_reference: # pragma: no cover
for pattern in output_pattern:
for generated_file in glob.glob(pattern):
reference_file = os.path.join(reference_dir, generated_file)
if os.path.isfile(reference_file):
continue
else:
try:
os.makedirs(reference_dir)
except FileExistsError:
# directory already exists
pass
print("copying %s to %s" % (generated_file, reference_file))
shutil.copyfile(generated_file, reference_file)
whole_diff_output = []
for coverage_file, reference_file in find_reference_files(
reference_dir, output_pattern
):
with io.open(coverage_file, encoding=encoding) as f:
coverage = scrub(f.read())
with io.open(reference_file, encoding=encoding) as f:
reference = scrub(f.read())
try:
if assert_equals is not None:
assert_equals(coverage, reference)
else:
diff_out = list(
difflib.unified_diff(
reference.splitlines(keepends=True),
coverage.splitlines(keepends=True),
fromfile=reference_file,
tofile=coverage_file,
)
)
diff_is_empty = len(diff_out) == 0
assert diff_is_empty, "".join(diff_out)
except Exception as e: # pragma: no cover
whole_diff_output += " " + str(e) + "\n"
if update_reference:
shutil.copyfile(coverage_file, reference_file)
if archive_differences:
diffs_zip = os.path.join("..", "diff.zip")
with zipfile.ZipFile(diffs_zip, mode="a") as f:
f.write(
coverage_file,
os.path.join(name, reference_dir, coverage_file).replace(
os.path.sep, "/"
),
)
diff_is_empty = len(whole_diff_output) == 0
assert diff_is_empty, "Diff output:\n" + "".join(whole_diff_output)
# some tests require additional cleanup after each test
if "clean-each" in available_targets:
assert run(["make", "clean-each"])
os.chdir(basedir)
|
def test_build(
compiled,
format,
available_targets,
generate_reference,
update_reference,
archive_differences,
):
name = compiled
scrub = SCRUBBERS[format]
output_pattern = OUTPUT_PATTERN[format]
assert_equals = ASSERT_EQUALS.get(format, None)
encoding = "utf8"
if format == "html" and name.startswith("html-encoding-"):
encoding = re.match("^html-encoding-(.*)$", name).group(1)
os.chdir(os.path.join(basedir, name))
assert run(["make", format])
reference_dir = REFERENCE_DIR
if (platform.system() == "Windows") and os.path.isdir(reference_dir + "-Windows"):
reference_dir += "-Windows"
if generate_reference: # pragma: no cover
for pattern in output_pattern:
for generated_file in glob.glob(pattern):
reference_file = os.path.join(reference_dir, generated_file)
if os.path.isfile(reference_file):
continue
else:
try:
os.makedirs(reference_dir)
except FileExistsError:
# directory already exists
pass
print("copying %s to %s" % (generated_file, reference_file))
shutil.copyfile(generated_file, reference_file)
whole_diff_output = []
for coverage_file, reference_file in find_reference_files(
reference_dir, output_pattern
):
with io.open(coverage_file, encoding=encoding) as f:
coverage = scrub(f.read())
with io.open(reference_file, encoding=encoding) as f:
reference = scrub(f.read())
try:
if assert_equals is not None:
assert_equals(coverage, reference)
else:
diff_out = list(
difflib.unified_diff(
reference.splitlines(keepends=True),
coverage.splitlines(keepends=True),
fromfile=reference_file,
tofile=coverage_file,
)
)
diff_is_empty = len(diff_out) == 0
assert diff_is_empty, "".join(diff_out)
except Exception as e: # pragma: no cover
whole_diff_output += " " + str(e) + "\n"
if update_reference:
shutil.copyfile(coverage_file, reference_file)
if archive_differences:
diffs_zip = os.path.join("..", "diff.zip")
with zipfile.ZipFile(diffs_zip, mode="a") as f:
f.write(
coverage_file,
os.path.join(name, reference_dir, coverage_file).replace(
os.path.sep, "/"
),
)
diff_is_empty = len(whole_diff_output) == 0
assert diff_is_empty, "Diff output:\n" + "".join(whole_diff_output)
# some tests require additional cleanup after each test
if "clean-each" in available_targets: # pragma: no cover
assert run(["make", "clean-each"])
os.chdir(basedir)
|
54,004 |
def _run_analyzer_modules(analyzer_report, configdir, imageId, unpackdir, outputdir):
for f in list_modules():
cmdstr = " ".join([f, configdir, imageId, unpackdir, outputdir, unpackdir])
with anchore_engine.utils.timer(
"Executing analyzer %s" % str(f), log_level="info"
):
try:
rc, sout, serr = anchore_engine.utils.run_command(cmdstr)
sout = anchore_engine.utils.ensure_str(sout)
serr = anchore_engine.utils.ensure_str(serr)
if rc != 0:
logger.error(
"command failed: cmd=%s exitcode=%s stdout=%s stderr=%s"
% (
repr(cmdstr),
repr(rc),
repr(sout.strip()),
repr(serr.strip()),
)
)
else:
logger.debug(
"command succeeded: cmd=%s stdout=%s stderr=%s"
% (repr(cmdstr), repr(sout.strip()), repr(serr.strip()))
)
except Exception:
logger.exception(
"Unexpected exception while running analyzer module (%s)", repr(f)
)
analyzer_output_dir = os.path.join(outputdir, "analyzer_output")
for analyzer_output in os.listdir(analyzer_output_dir):
element_dir = os.path.join(analyzer_output_dir, analyzer_output)
for element in os.listdir(element_dir):
data_path = os.path.join(element_dir, element)
data = utils.read_kvfile_todict(data_path)
if data:
analyzer_report[analyzer_output][element] = {"base": data}
|
def _run_analyzer_modules(analyzer_report, configdir, imageId, unpackdir, outputdir):
for f in list_modules():
cmdstr = " ".join([f, configdir, imageId, unpackdir, outputdir, unpackdir])
with anchore_engine.utils.timer(
"Executing analyzer %s" % str(f), log_level="info"
):
try:
rc, sout, serr = anchore_engine.utils.run_command(cmdstr)
sout = anchore_engine.utils.ensure_str(sout)
serr = anchore_engine.utils.ensure_str(serr)
if rc != 0:
logger.error(
"command failed: cmd=%s exitcode=%s stdout=%s stderr=%s"
% (
repr(cmdstr),
repr(rc),
repr(sout.strip()),
repr(serr.strip()),
)
)
else:
logger.debug(
"command succeeded: cmd=%s stdout=%s stderr=%s",
repr(cmdstr), repr(sout.strip()), repr(serr.strip())
)
except Exception:
logger.exception(
"Unexpected exception while running analyzer module (%s)", repr(f)
)
analyzer_output_dir = os.path.join(outputdir, "analyzer_output")
for analyzer_output in os.listdir(analyzer_output_dir):
element_dir = os.path.join(analyzer_output_dir, analyzer_output)
for element in os.listdir(element_dir):
data_path = os.path.join(element_dir, element)
data = utils.read_kvfile_todict(data_path)
if data:
analyzer_report[analyzer_output][element] = {"base": data}
|
5,701 |
def _validate_lower_upper(lower, upper):
if np.shape(lower) != np.shape(upper):
raise ValueError('lower and upper must have the same shape.')
if np.isnan(lower).any() or np.isnan(upper).any():
raise ValueError('lower and upper must not contain nan.')
if (np.isinf(lower) & np.isinf(upper)).any():
raise ValueError('lower and upper must not both be infinity.')
if (lower > upper).any():
raise ValueError('Elements of lower must not be greater than the '
'corresponding elements of upper.')
|
def _validate_lower_upper(lower, upper):
if np.shape(lower) != np.shape(upper):
raise ValueError('lower and upper must have the same shape.')
if np.isnan(lower).any() or np.isnan(upper).any():
raise ValueError('lower and upper must not contain nan.')
if (np.isinf(lower) & np.isinf(upper)).any():
raise ValueError('lower and upper must not both be infinite.')
if (lower > upper).any():
raise ValueError('Elements of lower must not be greater than the '
'corresponding elements of upper.')
|
31,415 |
def limit_attributes_count(event):
if 'Attribute' in event and len(event['Attribute']) > MAX_ATTRIBUTES:
attributes = event['Attribute']
attributes_num = len(attributes)
event_id = event.get('id', '')
event_uuid = event.get('uuid')
demisto.info(f'Limiting amount of attributes in event to {MAX_ATTRIBUTES} '
f'to keep context from being overwhelmed, '
f'this limit can be changed in the integration configuration. '
f'Event ID:{event_id}, event UUID:{event_uuid}, Attributes in event:{attributes_num}')
sorted_attributes = sorted(attributes, key=lambda at: int(at.get('timestamp', 0)))
#dropping oldest attributes
event['Attribute'] = sorted_attributes[attributes_num - MAX_ATTRIBUTES:]
return event
|
def limit_attributes_count(event):
if 'Attribute' in event and len(event['Attribute']) > MAX_ATTRIBUTES:
attributes = event['Attribute']
attributes_num = len(attributes)
event_id = event.get('id', '')
event_uuid = event.get('uuid')
demisto.info(f'Limiting amount of attributes in event to {MAX_ATTRIBUTES} '
f'to keep context from being overwhelmed, '
f'this limit can be changed in the integration configuration. '
f'Event ID:{event_id}, event UUID:{event_uuid}, Attributes in event:{attributes_num}')
sorted_attributes = sorted(attributes, key=lambda at: int(at.get('timestamp', 0)))
#dropping oldest attributes
event['Attribute'] = sorted_attributes[-MAX_ATTRIBUTES:]
return event
|
26,398 |
def generate_config(context):
# Using some global values from an external config file.
# Hardcoded for this example.
global_prefic = "acc "
# Manipulate context.properties #
for folder in context.properties["folders"]:
folder["displayName"] = global_prefic + folder["displayName"]
# Passing values forward to CFT template
return {
'resources': [{
'type': "cft-folder.py",
'name': context.env['name'],
'properties': context.properties}]
}
|
def generate_config(context):
# Using some global values from an external config file.
# Hardcoded for this example.
global_prefic = "acc "
# Manipulate context.properties #
for folder in context.properties["folders"]:
folder["displayName"] = global_prefix + folder["displayName"]
# Passing values forward to CFT template
return {
'resources': [{
'type': "cft-folder.py",
'name': context.env['name'],
'properties': context.properties}]
}
|
37,803 |
def pep_518_cp35_workaround(package_dir: Path, env: Dict[str, str]) -> None:
"""
Python 3.5 PEP 518 hack (see https://github.com/pypa/pip/issues/8392#issuecomment-639563494)
Basically, nuget's Python is an embedded Python distribution, which is not supported by pip.
Before version 3.6, there was no way to disable the "embedded" behavior, including the ignoring
of environment variables, including the ones pip uses to setup PEP 518 builds.
The fix here is as suggested in that issue; we manually setup the PEP 518 requirements. Since we
are in a fresh environment (except for pinned cibuildweel dependencies), the build is already
mostly "isolated".
"""
pyproject_path = package_dir / 'pyproject.toml'
if pyproject_path.exists():
data = toml.load(pyproject_path)
requirements = (
data['build-system'].get('requires', [])
if 'build-system' in data
else []
)
if requirements:
with tempfile.TemporaryDirectory() as d:
reqfile = Path(d) / "requirements.txt"
with reqfile.open("w") as f:
for r in requirements:
print(r, file=f)
call(['pip', 'install', '-r', str(reqfile)], env=env)
|
def pep_518_cp35_workaround(package_dir: Path, env: Dict[str, str]) -> None:
"""
Python 3.5 PEP 518 hack (see https://github.com/pypa/pip/issues/8392#issuecomment-639563494)
Basically, nuget's Python is an embedded Python distribution, which is not supported by pip.
Before version 3.6, there was no way to disable the "embedded" behavior, including the ignoring
of environment variables, including the ones pip uses to setup PEP 518 builds.
The fix here is as suggested in that issue; we manually setup the PEP 518 requirements. Since we
are in a fresh environment (except for pinned cibuildweel dependencies), the build is already
mostly "isolated".
"""
pyproject_path = package_dir / 'pyproject.toml'
if pyproject_path.exists():
data = toml.load(pyproject_path)
requirements = (
data['build-system'].get('requires', [])
if 'build-system' in data
else []
)
if requirements:
with tempfile.TemporaryDirectory() as d:
reqfile = Path(d) / "requirements.txt"
with reqfile.open("w") as f:
for r in requirements:
print(r, file=f)
call(['pip', 'install', '-r', reqfile], env=env)
|
28,092 |
def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
# We need pop == 0 to set the value so as to clear 'x' in systems with
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station_marketid = entry.get('MarketID') or this.station_marketid
# We might pick up StationName in DockingRequested, make sure we clear it if leaving
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
if config.get('station_provider') == 'EDSM':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station)
this.station_link.update_idletasks()
# Update display of 'EDSM Status' image
if this.system_link['text'] != system:
this.system_link['text'] = system or ''
this.system_link['image'] = ''
this.system_link.update_idletasks()
this.multicrew = bool(state['Role'])
if 'StarPos' in entry:
this.coordinates = entry['StarPos']
elif entry['event'] == 'LoadGame':
this.coordinates = None
if entry['event'] in ['LoadGame', 'Commander', 'NewCommander']:
this.newgame = True
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'StartUp':
this.newgame = False
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'Location':
this.newgame = True
this.newgame_docked = entry.get('Docked', False)
this.navbeaconscan = 0
elif entry['event'] == 'NavBeaconScan':
this.navbeaconscan = entry['NumBodies']
# Send interesting events to EDSM
if config.getint('edsm_out') and not is_beta and not this.multicrew and credentials(cmdr) and entry['event'] not in this.discardedEvents:
# Introduce transient states into the event
transient = {
'_systemName': system,
'_systemCoordinates': this.coordinates,
'_stationName': station,
'_shipId': state['ShipID'],
}
entry.update(transient)
if entry['event'] == 'LoadGame':
# Synthesise Materials events on LoadGame since we will have missed it
materials = {
'timestamp': entry['timestamp'],
'event': 'Materials',
'Raw': [ { 'Name': k, 'Count': v } for k,v in state['Raw'].items() ],
'Manufactured': [ { 'Name': k, 'Count': v } for k,v in state['Manufactured'].items() ],
'Encoded': [ { 'Name': k, 'Count': v } for k,v in state['Encoded'].items() ],
}
materials.update(transient)
this.queue.put((cmdr, materials))
this.queue.put((cmdr, entry))
|
def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
this.system = entry.get('StarSystem', this.system)
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station_marketid = entry.get('MarketID') or this.station_marketid
# We might pick up StationName in DockingRequested, make sure we clear it if leaving
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
if config.get('station_provider') == 'EDSM':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station)
this.station_link.update_idletasks()
# Update display of 'EDSM Status' image
if this.system_link['text'] != system:
this.system_link['text'] = system or ''
this.system_link['image'] = ''
this.system_link.update_idletasks()
this.multicrew = bool(state['Role'])
if 'StarPos' in entry:
this.coordinates = entry['StarPos']
elif entry['event'] == 'LoadGame':
this.coordinates = None
if entry['event'] in ['LoadGame', 'Commander', 'NewCommander']:
this.newgame = True
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'StartUp':
this.newgame = False
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'Location':
this.newgame = True
this.newgame_docked = entry.get('Docked', False)
this.navbeaconscan = 0
elif entry['event'] == 'NavBeaconScan':
this.navbeaconscan = entry['NumBodies']
# Send interesting events to EDSM
if config.getint('edsm_out') and not is_beta and not this.multicrew and credentials(cmdr) and entry['event'] not in this.discardedEvents:
# Introduce transient states into the event
transient = {
'_systemName': system,
'_systemCoordinates': this.coordinates,
'_stationName': station,
'_shipId': state['ShipID'],
}
entry.update(transient)
if entry['event'] == 'LoadGame':
# Synthesise Materials events on LoadGame since we will have missed it
materials = {
'timestamp': entry['timestamp'],
'event': 'Materials',
'Raw': [ { 'Name': k, 'Count': v } for k,v in state['Raw'].items() ],
'Manufactured': [ { 'Name': k, 'Count': v } for k,v in state['Manufactured'].items() ],
'Encoded': [ { 'Name': k, 'Count': v } for k,v in state['Encoded'].items() ],
}
materials.update(transient)
this.queue.put((cmdr, materials))
this.queue.put((cmdr, entry))
|
49,073 |
def test_issue_10426():
x=Dummy('x')
a=Symbol('a')
n=Symbol('n')
assert solveset(sin(x + a) - sin(x), a) == \
Union(ImageSet(Lambda(n, 2*pi*n), S.Integers),Intersection(S.Complexes,ImageSet(Lambda(n, -I*(I*(2*n*pi + arg(-exp(-2*I*x))) + 2*im(x))), S.Integers)))
|
def test_issue_10426():
x = Dummy('x')
a = Symbol('a')
n = Symbol('n')
assert solveset(sin(x + a) - sin(x), a) == Union(
ImageSet(Lambda(n, 2*pi*n), S.Integers),
Intersection(S.Complexes, ImageSet(Lambda(n, -I*(I*(2*n*pi + arg(-exp(-2*I*x))) + 2*im(x))), S.Integers)))
|
32,100 |
def test_get_issue_attachment_url(mocker, requests_mock):
"""
Given:
- The issue ID.
When
- Running the get issue command.
Then
- Ensure the outputs as expected
"""
from JiraV2 import get_issue
from test_data.raw_response import GET_ISSUE_WITH_ATTACHMENT_RESPONSE, MD_AND_CONTEXT_OUTPUT
mocker.patch.object(demisto, "params", return_value=integration_params)
mocker.patch('JiraV2.generate_md_context_get_issue', return_value=MD_AND_CONTEXT_OUTPUT)
requests_mock.get('https://localhost/rest/api/latest/issue/VIK-267', json=GET_ISSUE_WITH_ATTACHMENT_RESPONSE)
requests_mock.get('https://localhost/rest/api/2/attachment/content/16188', json={})
get_issue("VIK-267", get_attachments="true") # the command will fail if not doing a request with the proper url
|
def test_get_issue_attachment_url(mocker, requests_mock):
"""
Given:
- The issue ID.
When
- Running the get issue command.
Then
- Ensure the outputs as expected
"""
from JiraV2 import get_issue
from test_data.raw_response import GET_ISSUE_WITH_ATTACHMENT_RESPONSE, MD_AND_CONTEXT_OUTPUT
mocker.patch.object(demisto, "params", return_value=integration_params)
mocker.patch('JiraV2.generate_md_context_get_issue', return_value=MD_AND_CONTEXT_OUTPUT)
requests_mock.get('https://localhost/rest/api/latest/issue/VIK-267', json=GET_ISSUE_WITH_ATTACHMENT_RESPONSE)
requests_mock.get('https://localhost/rest/api/2/attachment/content/16188', json={})
assert get_issue("VIK-267", get_attachments="true"), 'There was a request to the wrong url'
|
43,629 |
def decompose_hamiltonian(H, hide_identity=False):
"""Decomposes a Hermitian matrix into a linear combination of Pauli operators.
Args:
H (array[complex]): an Hermitian matrix of dimension :math:`2^n\times 2^n`
Keyword Args:
hide_identity (bool): always show ~.Identity observables in the results
Returns:
tuple[list[float], list[~.Observable]]: Returns a list of tensor products of PennyLane Pauli observables, as
well as the corresponding coefficients for each tensor product.
**Example:**
We can use this function to compute the Pauli operator decomposition of an arbitrary Hermitian
matrix:
>>> A = np.array([[-2, -2+1j, -2, -2], [-2-1j, 0, 0, -1], [-2, 0, -2, -1], [-2, -1, -1, 0]])
>>> coeffs, obs_list = decompose_hamiltonian(A)
>>> coeffs
[-1.0, -1.5, -0.5, -1.0, -1.5, -1.0, -0.5, 1.0, -0.5, -0.5]
We can use the output coefficients and tensor Pauli terms to construct a :class:`~.Hamiltonian`:
>>> H = qml.Hamiltonian(coeffs, obs_list)
>>> print(H)
(-1.0) [I0 I1]
+ (-1.5) [X1]
+ (-0.5) [Y1]
+ (-1.0) [Z1]
+ (-1.5) [X0]
+ (-1.0) [X0 X1]
+ (-0.5) [X0 Z1]
+ (1.0) [Y0 Y1]
+ (-0.5) [Z0 X1]
+ (-0.5) [Z0 Y1]
This Hamiltonian can then be used in defining VQE problems using :class:`~VQECost`.
"""
n = int(np.log2(len(H)))
N = 2 ** n
if len(H) - N != 0:
raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1")
if not np.allclose(H, H.conj().T):
raise ValueError("The Hamiltonian is not Hermitian")
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
for term in itertools.product(paulis, repeat=n):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / N
coeff = np.real_if_close(coeff).item()
if not np.allclose(coeff, 0):
coeffs.append(coeff)
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
matmul,
[
t(i)
for i, t in enumerate(term)
if t is not qml.Identity or not hide_identity
],
)
)
else:
obs.append(functools.reduce(matmul, [t(i) for i, t in enumerate(term)]))
return coeffs, obs
|
def decompose_hamiltonian(H, hide_identity=False):
"""Decomposes a Hermitian matrix into a linear combination of Pauli operators.
Args:
H (array[complex]): an Hermitian matrix of dimension :math:`2^n\times 2^n`
Keyword Args:
hide_identity (bool): always show ~.Identity observables in the results
Returns:
tuple[list[float], list[~.Observable]]: Returns a list of tensor products of PennyLane Pauli observables, as
well as the corresponding coefficients for each tensor product.
**Example:**
We can use this function to compute the Pauli operator decomposition of an arbitrary Hermitian
matrix:
>>> A = np.array([[-2, -2+1j, -2, -2], [-2-1j, 0, 0, -1], [-2, 0, -2, -1], [-2, -1, -1, 0]])
>>> coeffs, obs_list = decompose_hamiltonian(A)
>>> coeffs
[-1.0, -1.5, -0.5, -1.0, -1.5, -1.0, -0.5, 1.0, -0.5, -0.5]
We can use the output coefficients and tensor Pauli terms to construct a :class:`~.Hamiltonian`:
>>> H = qml.Hamiltonian(coeffs, obs_list)
>>> print(H)
(-1.0) [I0 I1]
+ (-1.5) [X1]
+ (-0.5) [Y1]
+ (-1.0) [Z1]
+ (-1.5) [X0]
+ (-1.0) [X0 X1]
+ (-0.5) [X0 Z1]
+ (1.0) [Y0 Y1]
+ (-0.5) [Z0 X1]
+ (-0.5) [Z0 Y1]
This Hamiltonian can then be used in defining VQE problems using :class:`~VQECost`.
"""
n = int(np.log2(len(H)))
N = 2 ** n
if len(H) - N != 0:
raise ValueError("Hamiltonian should have shape (2^n, 2^n), for any qubit number n>=1")
if not np.allclose(H, H.conj().T):
raise ValueError("The Hamiltonian is not Hermitian")
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
for term in itertools.product(paulis, repeat=n):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / N
coeff = np.real_if_close(coeff).item()
if not np.allclose(coeff, 0):
coeffs.append(coeff)
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
matmul,
[
t(i)
for i, t in enumerate(term)
if t is not qml.Identity or not hide_identity
],
)
)
else:
obs.append(functools.reduce(matmul, [t(i) for i, t in enumerate(term)]))
return coeffs, obs
|
50,170 |
def validate_result(result):
def validate_list(data):
return not any(map(invalid_float, data))
def validate_dict(data, keys):
for k in keys:
if k not in data or invalid_float(data[k]):
return False
return True
def invalid_float(value):
return value is None or value == float("inf") or value == float("-inf")
if "dimensions" not in result or not validate_dict(result["dimensions"], dimensions):
return False
if "travel_dimensions" not in result or not validate_dict(
result["travel_dimensions"], travel_dimensions
):
return False
if "extrusion_length" not in result or not validate_list(result["extrusion_length"]):
return False
if "extrusion_volume" not in result or not validate_list(result["extrusion_volume"]):
return False
if "printing_area" not in result or not validate_dict(
result["printing_area"], printing_area
):
return False
if "travel_area" not in result or not validate_dict(
result["travel_area"], travel_area
):
return False
if "total_time" not in result or invalid_float(result["total_time"]):
return False
return True
|
def validate_result(result):
def validate_list(data):
return not any(map(invalid_float, data))
def validate_dict(data, keys):
for k in keys:
if k not in data or invalid_float(data[k]):
return False
return True
def invalid_float(value):
return value is None or value == float("inf") or value == float("-inf")
if "dimensions" not in result or not validate_dict(result["dimensions"], dimensions):
return False
if "travel_dimensions" not in result or not validate_dict(
result["travel_dimensions"], dimensions
):
return False
if "extrusion_length" not in result or not validate_list(result["extrusion_length"]):
return False
if "extrusion_volume" not in result or not validate_list(result["extrusion_volume"]):
return False
if "printing_area" not in result or not validate_dict(
result["printing_area"], printing_area
):
return False
if "travel_area" not in result or not validate_dict(
result["travel_area"], travel_area
):
return False
if "total_time" not in result or invalid_float(result["total_time"]):
return False
return True
|
9,131 |
def WriteXmlIfChanged(content, path, encoding="utf-8", pretty=False, win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != "\r\n":
xml_string = xml_string.replace("\n", "\r\n")
default_encoding = locale.getdefaultlocale()[1]
if default_encoding and default_encoding.upper() != encoding.upper():
if (sys.version_info[0] >= 3) and (sys.version_info[1] >= 7):
xml_string = xml_string.encode(encoding)
else:
xml_string = xml_string.decode("cp1251").encode(encoding)
# Get the old content
try:
with open(path, "r") as file:
existing = file.read()
except IOError:
existing = None
# It has changed, write it
if existing != xml_string:
with open(path, "wb") as file:
file.write(xml_string)
|
def WriteXmlIfChanged(content, path, encoding="utf-8", pretty=False, win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != "\r\n":
xml_string = xml_string.replace("\n", "\r\n")
default_encoding = locale.getdefaultlocale()[1]
if default_encoding and default_encoding.upper() != encoding.upper():
if sys.version_info >= (3, 7):
xml_string = xml_string.encode(encoding)
else:
xml_string = xml_string.decode("cp1251").encode(encoding)
# Get the old content
try:
with open(path, "r") as file:
existing = file.read()
except IOError:
existing = None
# It has changed, write it
if existing != xml_string:
with open(path, "wb") as file:
file.write(xml_string)
|
57,733 |
def main() -> None:
try:
entity = demisto.args().get('indicator').get('CustomFields', {}).get('chronicleassethostname', '')
is_isolated = demisto.args().get('indicator').get('CustomFields', {})\
.get('chronicleisolatedhostname', 'No')
html = get_html_representation(entity, is_isolated)
demisto.results({
"Type": 1,
"ContentsFormat": formats["html"],
"Contents": html
})
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Could not load widget:\n{e}')
|
def main() -> None:
try:
indicator = demisto.args().get('indicator')
entity = indicator.get('CustomFields', {}).get('chronicleassethostname', '')
is_isolated = indicator.get('CustomFields', {}).get('chronicleisolatedhostname', 'No')
html = get_html_representation(entity, is_isolated)
demisto.results({
"Type": 1,
"ContentsFormat": formats["html"],
"Contents": html
})
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Could not load widget:\n{e}')
|
28,344 |
def many(curr: sqlite3.Cursor, *columns: str) -> tuple[Any, ...]:
"""Get the values of many columns from one row
Args:
curr: cursor to operate on
columns: names of the columns
Returns:
list of values
"""
res = curr.fetchall()
if len(res) > 1:
raise RuntimeError("Expected only one row")
elif _need_to_select(curr, *columns):
raise RuntimeError("Expected consistent selection")
else:
return res[0]
|
def many(curr: sqlite3.Cursor, *columns: str) -> tuple[Any, ...]:
"""Get the values of many columns from one row
Args:
curr: cursor to operate on
columns: names of the columns
Returns:
list of values
"""
res = curr.fetchall()
if len(res) > 1:
raise RuntimeError("Expected only one row")
elif _need_to_select(curr, *columns):
raise RuntimeError("Expected consistent selection: cursor has columns {tuple(c[0] for c in curr.description)} but expected {columns}")
else:
return res[0]
|
52,464 |
def _get_labels_from_spancat(
nlp: Language, factory_name: str
) -> Dict[str, Set[str]]:
pipe_names = [
pipe_name
for pipe_name in nlp.pipe_names
if nlp.get_pipe_meta(pipe_name).factory == factory_name
]
labels: Dict[str, Set[str]] = {}
for pipe_name in pipe_names:
pipe = cast(SpanCategorizer, nlp.get_pipe(pipe_name))
if pipe.key not in labels:
labels[pipe.key] = set()
labels[pipe.key].update(pipe.labels)
return labels
|
def _get_labels_from_spancat(
nlp: Language, factory_name: str
) -> Dict[str, Set[str]]:
pipe_names = [
pipe_name
for pipe_name in nlp.pipe_names
if nlp.get_pipe_meta(pipe_name).factory == factory_name
]
labels: Dict[str, Set[str]] = {}
for pipe_name in pipe_names:
pipe = nlp.get_pipe(pipe_name)
assert isinstance(pipe, SpanCategorizer)
if pipe.key not in labels:
labels[pipe.key] = set()
labels[pipe.key].update(pipe.labels)
return labels
|
49,246 |
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import __main__
py_script = Path(sys.argv[0])
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if hasattr(sys, "implementation") and sys.implementation.name == "cpython":
xoptions_args = [
"-X%s" % key if isinstance(value, bool) else "-X%s=%s" % (key, value)
for key, value in sys._xoptions.items()
]
args = args + xoptions_args
# __spec__ is set when the server was started with the `-m` option,
# see https://docs.python.org/3/reference/import.html#main-spec
# __spec__ may not exist, e.g. when running in a Conda env.
if getattr(__main__, '__spec__', None) is not None:
spec = __main__.__spec__
if (spec.name == '__main__' or spec.name.endswith('.__main__')) and spec.parent:
name = spec.parent
else:
name = spec.name
args += ['-m', name]
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix('.exe')
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
return [exe_entrypoint, *sys.argv[1:]]
script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
return [*args, script_entrypoint, *sys.argv[1:]]
raise RuntimeError('Script %s does not exist.' % py_script)
else:
args += sys.argv
return args
|
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import __main__
py_script = Path(sys.argv[0])
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.implementation.name == 'cpython':
args.extend(
f'-X{key}' if isinstance(value, bool) else f'-X{key}={value}'
for key, value in sys._xoptions.items()
)
# __spec__ is set when the server was started with the `-m` option,
# see https://docs.python.org/3/reference/import.html#main-spec
# __spec__ may not exist, e.g. when running in a Conda env.
if getattr(__main__, '__spec__', None) is not None:
spec = __main__.__spec__
if (spec.name == '__main__' or spec.name.endswith('.__main__')) and spec.parent:
name = spec.parent
else:
name = spec.name
args += ['-m', name]
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix('.exe')
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
return [exe_entrypoint, *sys.argv[1:]]
script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
return [*args, script_entrypoint, *sys.argv[1:]]
raise RuntimeError('Script %s does not exist.' % py_script)
else:
args += sys.argv
return args
|
44,291 |
def expval(op):
r"""Expectation value of the supplied observable.
**Example:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
-0.4794255386042029
Args:
op (Observable): a quantum observable object
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
"""
if not op.is_hermitian:
warnings.warn(f"{op.name} might not be an observable.")
return MeasurementProcess(Expectation, obs=op)
|
def expval(op):
r"""Expectation value of the supplied observable.
**Example:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliY(0))
Executing this QNode:
>>> circuit(0.5)
-0.4794255386042029
Args:
op (Observable): a quantum observable object
Raises:
QuantumFunctionError: `op` is not an instance of :class:`~.Observable`
"""
if not op.is_hermitian:
warnings.warn(f"{op.name} might not be an Hermitian.")
return MeasurementProcess(Expectation, obs=op)
|
12,778 |
def main(path=None):
verbose = 0
if path is None:
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], "v")
for k, v in opts:
if k == "-v":
verbose += 1
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
# This is what happens if undo is applied to the transaction creating
# the object (the oid is still in the index, but its current data
# record has a backpointer of 0, and POSKeyError is raised then
# because of that backpointer).
undone = {}
# Set of oids that were present in the index but failed to load.
# This does not include oids in undone.
noload = {}
#print("# building references graph ...")
graph = {} # oid -> refs
posoidv = list((pos, oid) for (oid, pos) in fs._index.items()) # [] of (pos, oid)
posoidv.sort() # access objects in order of pos↑ (optimize disk IO)
for _,oid in posoidv:
try:
data, serial = load_current(fs, oid)
except (KeyboardInterrupt, SystemExit):
raise
except POSKeyError:
undone[oid] = 1
continue
except:
if verbose:
traceback.print_exc()
noload[oid] = 1
continue
refs = get_refs(data)
graph[oid] = refs
#print("# verifying reachability ...")
oidv = list(graph.keys())
oidv.sort() # verify objects in order of oid↑ (useful for human perception; stable output)
for oid in oidv:
refs = graph[oid]
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
if ref not in graph:
missing.append((ref, klass, "missing"))
if ref in noload:
missing.append((ref, klass, "failed to load"))
if ref in undone:
missing.append((ref, klass, "object creation was undone"))
if missing:
report(oid, data, serial, missing)
|
def main(path=None):
verbose = 0
if path is None:
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], "v")
for k, v in opts:
if k == "-v":
verbose += 1
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
# This is what happens if undo is applied to the transaction creating
# the object (the oid is still in the index, but its current data
# record has a backpointer of 0, and POSKeyError is raised then
# because of that backpointer).
undone = {}
# Set of oids that were present in the index but failed to load.
# This does not include oids in undone.
noload = {}
#print("# building references graph ...")
graph = {} # oid -> refs
posoidv = list((pos, oid) for (oid, pos) in fs._index.items()) # [] of (pos, oid)
posoidv.sort() # access objects in order of pos↑ (optimize disk IO)
for _,oid in posoidv:
try:
data, serial = load_current(fs, oid)
except (KeyboardInterrupt, SystemExit):
raise
except POSKeyError:
undone[oid] = 1
continue
except:
if verbose:
traceback.print_exc()
noload[oid] = 1
continue
refs = get_refs(data)
graph[oid] = refs
#print("# verifying reachability ...")
oidv = list(graph.keys())
oidv.sort() # verify objects in order of ascending oid (useful for human perception; stable output)
for oid in oidv:
refs = graph[oid]
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
if ref not in graph:
missing.append((ref, klass, "missing"))
if ref in noload:
missing.append((ref, klass, "failed to load"))
if ref in undone:
missing.append((ref, klass, "object creation was undone"))
if missing:
report(oid, data, serial, missing)
|
10,489 |
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, fixed=False, autoremove=False, no_remove=False, only_upgrade=False,
allow_unauthenticated=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
package_names = []
for package in pkgspec:
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
name, version = package_split(package)
package_names.append(name)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install')
if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed_version and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if packages:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if no_remove:
no_remove = '--no-remove'
else:
no_remove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if fixed:
fixed = '--fix-broken'
else:
fixed = ''
if build_dep:
cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, no_remove, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
(APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, no_remove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
status = True
changed = True
if build_dep:
changed = APT_GET_ZERO not in out
data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
if rc:
status = False
data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
else:
status = True
data = dict(changed=False)
if not build_dep:
mark_installed_manually(m, package_names)
return (status, data)
|
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, fixed=False, autoremove=False, no_remove=False, only_upgrade=False,
allow_unauthenticated=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
package_names = []
for package in pkgspec:
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
name, version = package_split(package)
package_names.append(name)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install')
if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed_version and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if packages:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if no_remove:
no_remove = '--no-remove'
else:
no_remove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if fixed:
fixed = '--fix-broken'
else:
fixed = ''
if build_dep:
cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, no_remove, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
(APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, fail_on_autoremove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
status = True
changed = True
if build_dep:
changed = APT_GET_ZERO not in out
data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
if rc:
status = False
data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
else:
status = True
data = dict(changed=False)
if not build_dep:
mark_installed_manually(m, package_names)
return (status, data)
|
34,982 |
def batch_matmul(x, y, oshape=None, auto_scheduler_rewritten_layout=""):
"""Computes batch matrix multiplication of `x` and `y` when `x` and `y` are
data in batch. Supports broadcasting for batch dimension.
Parameters
----------
x : tvm.te.Tensor
3-D with shape [batch, M, K]
y : tvm.te.Tensor
3-D with shape [batch, N, K]
oshape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
x_shape = get_const_tuple(x.shape)
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
y_shape = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["b", "j", "k"]
)
auto_scheduler.remove_index_check(y)
else:
y_shape = get_const_tuple(y.shape)
assert len(x_shape) == 3 and len(y_shape) == 3, "only support 3-dim batch_matmul"
XB = x_shape[0]
YB = y_shape[0]
_, M, K = x.shape
k = te.reduce_axis((0, K), name="k")
if oshape is None:
if isinstance(XB, int) and isinstance(YB, int):
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
batch = max(XB, YB)
elif isinstance(XB, tir.expr.Var):
batch = XB
else:
batch = YB
if isinstance(x_shape[2], int) and isinstance(y_shape[2], int):
assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistant"
N = y.shape[1]
oshape = (batch, M, N)
output = te.compute(
oshape,
lambda b, i, j: te.sum(x[b if XB != 1 else 0, i, k] * y[b if YB != 1 else 0, j, k], axis=k),
tag="batch_matmul",
attrs={"layout_free_placeholders": [y]},
)
if auto_scheduler_rewritten_layout:
output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout)
return output
|
def batch_matmul(x, y, oshape=None, auto_scheduler_rewritten_layout=""):
"""Computes batch matrix multiplication of `x` and `y` when `x` and `y` are
data in batch. Supports broadcasting for batch dimension.
Parameters
----------
x : tvm.te.Tensor
3-D with shape [batch, M, K]
y : tvm.te.Tensor
3-D with shape [batch, N, K]
oshape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
x_shape = get_const_tuple(x.shape)
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
y_shape = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["b", "j", "k"]
)
auto_scheduler.remove_index_check(y)
else:
y_shape = get_const_tuple(y.shape)
assert len(x_shape) == 3 and len(y_shape) == 3, "only support 3-dim batch_matmul"
XB = x_shape[0]
YB = y_shape[0]
_, M, K = x.shape
k = te.reduce_axis((0, K), name="k")
if oshape is None:
if isinstance(XB, int) and isinstance(YB, int):
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
batch = max(XB, YB)
elif isinstance(XB, tir.expr.Var):
batch = XB
else:
batch = YB
if isinstance(x_shape[2], int) and isinstance(y_shape[2], int):
assert x_shape[2] == y_shape[2], "shapes of x and y are inconsistant"
N = y.shape[1]
oshape = (batch, M, N)
output = te.compute(
oshape,
lambda b, i, j: te.sum(x[b if XB != 1 else 0, i, k] * y[b if YB != 1 else 0, j, k], axis=k),
tag="batch_matmul",
attrs={"layout_free_placeholders": [y]},
)
if auto_scheduler_rewritten_layout:
output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout)
return output
|
35,827 |
def make_arg_clamper(datapos, mempos, typ, is_init=False):
"""
Clamps argument to type limits.
"""
if not is_init:
data_decl = ['calldataload', ['add', 4, datapos]]
copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)
else:
data_decl = ['codeload', ['add', '~codelen', datapos]]
copier = functools.partial(_mk_codecopy_copier, mempos=mempos)
# Numbers: make sure they're in range
if is_base_type(typ, 'int128'):
return LLLnode.from_list([
'clamp',
['mload', MemoryPositions.MINNUM],
data_decl,
['mload', MemoryPositions.MAXNUM]
], typ=typ, annotation='checking int128 input')
# Booleans: make sure they're zero or one
elif is_base_type(typ, 'bool'):
return LLLnode.from_list(
['uclamplt', data_decl, 2],
typ=typ,
annotation='checking bool input',
)
# Addresses: make sure they're in range
elif is_base_type(typ, 'address'):
return LLLnode.from_list(
['uclamplt', data_decl, ['mload', MemoryPositions.ADDRSIZE]],
typ=typ,
annotation='checking address input',
)
# Bytes: make sure they have the right size
elif isinstance(typ, ByteArrayLike):
return LLLnode.from_list([
'seq',
copier(data_decl, 32 + typ.maxlen),
['assert', ['le', ['calldataload', ['add', 4, data_decl]], typ.maxlen]]
], typ=None, annotation='checking bytearray input')
# Lists: recurse
elif isinstance(typ, ListType):
if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
subtype_size = get_size_of_type(typ.subtype)
i_incr = get_size_of_type(typ.subtype) * 32
# for i in range(typ.count):
mem_to = subtype_size * 32 * (typ.count - 1)
loop_label = "_check_list_loop_%s" % str(uuid.uuid4())
# use LOOP_FREE_INDEX to store i
offset = 288
o = [['mstore', offset, 0], # init loop
['label', loop_label],
make_arg_clamper(['add', datapos, ['mload', offset]],
['add', mempos, ['mload', offset]], typ.subtype, is_init),
['mstore', offset, ['add', ['mload', offset], i_incr]],
['if', ['lt', ['mload', offset], mem_to],
['goto', loop_label]]]
else:
o = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
return LLLnode.from_list(['seq'] + o, typ=None, annotation='checking list input')
# Otherwise don't make any checks
else:
return LLLnode.from_list('pass')
|
def make_arg_clamper(datapos, mempos, typ, is_init=False):
"""
Clamps argument to type limits.
"""
if not is_init:
data_decl = ['calldataload', ['add', 4, datapos]]
copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)
else:
data_decl = ['codeload', ['add', '~codelen', datapos]]
copier = functools.partial(_mk_codecopy_copier, mempos=mempos)
# Numbers: make sure they're in range
if is_base_type(typ, 'int128'):
return LLLnode.from_list([
'clamp',
['mload', MemoryPositions.MINNUM],
data_decl,
['mload', MemoryPositions.MAXNUM]
], typ=typ, annotation='checking int128 input')
# Booleans: make sure they're zero or one
elif is_base_type(typ, 'bool'):
return LLLnode.from_list(
['uclamplt', data_decl, 2],
typ=typ,
annotation='checking bool input',
)
# Addresses: make sure they're in range
elif is_base_type(typ, 'address'):
return LLLnode.from_list(
['uclamplt', data_decl, ['mload', MemoryPositions.ADDRSIZE]],
typ=typ,
annotation='checking address input',
)
# Bytes: make sure they have the right size
elif isinstance(typ, ByteArrayLike):
return LLLnode.from_list([
'seq',
copier(data_decl, 32 + typ.maxlen),
['assert', ['le', ['calldataload', ['add', 4, data_decl]], typ.maxlen]]
], typ=None, annotation='checking bytearray input')
# Lists: recurse
elif isinstance(typ, ListType):
if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
subtype_size = get_size_of_type(typ.subtype)
i_incr = subtype_size * 32
# for i in range(typ.count):
mem_to = subtype_size * 32 * (typ.count - 1)
loop_label = "_check_list_loop_%s" % str(uuid.uuid4())
# use LOOP_FREE_INDEX to store i
offset = 288
o = [['mstore', offset, 0], # init loop
['label', loop_label],
make_arg_clamper(['add', datapos, ['mload', offset]],
['add', mempos, ['mload', offset]], typ.subtype, is_init),
['mstore', offset, ['add', ['mload', offset], i_incr]],
['if', ['lt', ['mload', offset], mem_to],
['goto', loop_label]]]
else:
o = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
return LLLnode.from_list(['seq'] + o, typ=None, annotation='checking list input')
# Otherwise don't make any checks
else:
return LLLnode.from_list('pass')
|
25,597 |
def update_monitoring_service_from_balance_proof(
raiden: "RaidenService",
chain_state: ChainState,
new_balance_proof: BalanceProofSignedState,
non_closing_participant: Address,
) -> None:
if raiden.config.services.monitoring_enabled is False:
return
msg = "Monitoring is enabled but the default monitoring service address is None."
assert raiden.default_msc_address is not None, msg
channel_state = views.get_channelstate_by_canonical_identifier(
chain_state=chain_state, canonical_identifier=new_balance_proof.canonical_identifier
)
msg = (
f"Failed to update monitoring service due to inability to find "
f"channel: {new_balance_proof.channel_identifier} "
f"token_network_address: {to_checksum_address(new_balance_proof.token_network_address)}."
)
assert channel_state, msg
msg = "Monitoring is enabled but the `UserDeposit` contract is None."
assert raiden.default_user_deposit is not None, msg
rei_balance = raiden.default_user_deposit.effective_balance(raiden.address, BLOCK_ID_LATEST)
if rei_balance < MONITORING_REWARD:
rdn_balance = to_rdn(rei_balance)
rdn_reward = to_rdn(MONITORING_REWARD)
log.warning(
f"Skipping update to Monitoring service. "
f"Your deposit balance {rdn_balance} is less than "
f"the required monitoring service reward of {rdn_reward}"
)
return
# In production there should be no MonitoringRequest if
# channel balance is below a certain threshold. This is
# a naive approach that needs to be worked on in the future
if raiden.config.environment_type == Environment.PRODUCTION:
message = (
"Skipping update to Monitoring service. "
"Your channel balance {channel_balance} is less than "
"the required minimum balance of {min_balance} "
"that you have set before sending the MonitorRequest,"
" token address {token_address}"
)
dai_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=DAI_TOKEN_ADDRESS,
)
weth_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=WETH_TOKEN_ADDRESS,
)
channel_balance = get_balance(
sender=channel_state.our_state, receiver=channel_state.partner_state,
)
if channel_state.canonical_identifier.token_network_address == dai_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_DAI:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_DAI,
token_address=to_checksum_address(DAI_TOKEN_ADDRESS)
)
)
return
if channel_state.canonical_identifier.token_network_address == weth_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_WETH:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_WETH,
token_address=to_checksum_address(WETH_TOKEN_ADDRESS)
)
)
return
log.info(
"Received new balance proof, creating message for Monitoring Service.",
node=to_checksum_address(raiden.address),
balance_proof=new_balance_proof,
)
monitoring_message = RequestMonitoring.from_balance_proof_signed_state(
balance_proof=new_balance_proof,
non_closing_participant=non_closing_participant,
reward_amount=MONITORING_REWARD,
monitoring_service_contract_address=raiden.default_msc_address,
)
monitoring_message.sign(raiden.signer)
raiden.transport.broadcast(constants.MONITORING_BROADCASTING_ROOM, monitoring_message)
|
def update_monitoring_service_from_balance_proof(
raiden: "RaidenService",
chain_state: ChainState,
new_balance_proof: BalanceProofSignedState,
non_closing_participant: Address,
) -> None:
if raiden.config.services.monitoring_enabled is False:
return
msg = "Monitoring is enabled but the default monitoring service address is None."
assert raiden.default_msc_address is not None, msg
channel_state = views.get_channelstate_by_canonical_identifier(
chain_state=chain_state, canonical_identifier=new_balance_proof.canonical_identifier
)
msg = (
f"Failed to update monitoring service due to inability to find "
f"channel: {new_balance_proof.channel_identifier} "
f"token_network_address: {to_checksum_address(new_balance_proof.token_network_address)}."
)
assert channel_state, msg
msg = "Monitoring is enabled but the `UserDeposit` contract is None."
assert raiden.default_user_deposit is not None, msg
rei_balance = raiden.default_user_deposit.effective_balance(raiden.address, BLOCK_ID_LATEST)
if rei_balance < MONITORING_REWARD:
rdn_balance = to_rdn(rei_balance)
rdn_reward = to_rdn(MONITORING_REWARD)
log.warning(
f"Skipping update to Monitoring service. "
f"Your deposit balance {rdn_balance} is less than "
f"the required monitoring service reward of {rdn_reward}"
)
return
# In production there should be no MonitoringRequest if
# channel balance is below a certain threshold. This is
# a naive approach that needs to be worked on in the future
if raiden.config.environment_type == Environment.PRODUCTION:
message = (
"Skipping update to Monitoring service. "
"Your channel balance {channel_balance} is less than "
"the required minimum balance of {min_balance} "
"that you have set before sending the MonitorRequest,"
" token address {token_address}"
)
dai_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=DAI_TOKEN_ADDRESS,
)
weth_token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=raiden.default_registry.address,
token_address=WETH_TOKEN_ADDRESS,
)
channel_balance = get_balance(
sender=channel_state.our_state, receiver=channel_state.partner_state,
)
if channel_state.canonical_identifier.token_network_address == dai_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_DAI:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_DAI,
token_address=to_checksum_address(DAI_TOKEN_ADDRESS)
)
)
return
if channel_state.canonical_identifier.token_network_address == weth_token_network_address:
if channel_balance < MIN_MONITORING_AMOUNT_WETH:
log.warning(
message.format(
channel_balance=channel_balance,
min_balance=MIN_MONITORING_AMOUNT_WETH,
token_address=to_checksum_address(WETH_TOKEN_ADDRESS),
)
)
return
log.info(
"Received new balance proof, creating message for Monitoring Service.",
node=to_checksum_address(raiden.address),
balance_proof=new_balance_proof,
)
monitoring_message = RequestMonitoring.from_balance_proof_signed_state(
balance_proof=new_balance_proof,
non_closing_participant=non_closing_participant,
reward_amount=MONITORING_REWARD,
monitoring_service_contract_address=raiden.default_msc_address,
)
monitoring_message.sign(raiden.signer)
raiden.transport.broadcast(constants.MONITORING_BROADCASTING_ROOM, monitoring_message)
|
5,499 |
def react_document(request, document_slug, document_locale):
"""
View a wiki document.
"""
# If any query parameter is used that is only supported by the wiki view,
# redirect to the wiki domain.
if frozenset(request.GET) & WIKI_ONLY_DOCUMENT_QUERY_PARAMS:
return redirect_to_wiki(request)
slug_dict = split_slug(document_slug)
# Is there a document at this slug, in this locale?
doc, fallback_reason = _get_doc_and_fallback_reason(document_locale, document_slug)
if doc is None:
# We can throw a 404 immediately if the request type is HEAD.
# TODO: take a shortcut if the document was found?
if request.method == "HEAD":
raise Http404
# Check if we should fall back to default locale.
fallback_doc, fallback_reason, redirect_url = _default_locale_fallback(
request, document_slug, document_locale
)
if fallback_doc is not None:
doc = fallback_doc
if redirect_url is not None:
return redirect(redirect_url)
else:
# It could be that the document you're trying to view was deleted and
# the reason we're not finding a fallback is the because the slug
# doesn't match.
# E.g. you're trying to view `/sv-SE/docs/Foö/Bår` but that document
# was deleted and as a soft-delete its parent was `/en-US/docs/Foo/Bar`
if document_locale != settings.LANGUAGE_CODE: # Not in English!
redirect_url = _get_deleted_parent_redirect_url(
document_locale, document_slug
)
if redirect_url:
return redirect(redirect_url)
raise Http404
# We found a Document. Now we need to figure out how we're going
# to display it.
# If we're a redirect, and redirecting hasn't been disabled, redirect.
# Obey explicit redirect pages:
# Don't redirect on redirect=no (like Wikipedia), so we can link from a
# redirected-to-page back to a "Redirected from..." link, so you can edit
# the redirect.
redirect_url = (
None if request.GET.get("redirect") == "no" else doc.get_redirect_url()
)
if redirect_url and redirect_url != doc.get_absolute_url():
url = urlparams(redirect_url, query_dict=request.GET)
# TODO: Re-enable the link in this message after Django >1.5 upgrade
# Redirected from <a href="%(url)s?redirect=no">%(url)s</a>
messages.add_message(
request,
messages.WARNING,
mark_safe(
gettext("Redirected from %(url)s")
% {"url": request.build_absolute_uri(doc.get_absolute_url())}
),
extra_tags="wiki_redirect",
)
return HttpResponsePermanentRedirect(url)
# Get the SEO summary
seo_summary = doc.get_summary_text()
# Get the additional title information, if necessary.
seo_parent_title = _get_seo_parent_title(doc, slug_dict, document_locale)
# Get the JSON data for this document
doc_api_data = document_api_data(doc)
document_data = doc_api_data["documentData"]
def robots_index():
if fallback_reason:
return False
if not doc.html:
return False
if doc.is_experiment:
return False
if doc.has_legacy_namespace:
return False
if doc.has_noindex_slug:
return False
if request.get_host() not in settings.ALLOW_ROBOTS_WEB_DOMAINS:
return False
return True
robots_meta_content = "index, follow" if robots_index() else "noindex, nofollow"
# Bundle it all up and, finally, return.
context = {
"document_data": document_data,
# TODO: anything we're actually using in the template ought
# to be bundled up into the json object above instead.
"seo_summary": seo_summary,
"seo_parent_title": seo_parent_title,
"robots_meta_content": robots_meta_content,
}
response = render(request, "wiki/react_document.html", context)
return _add_kuma_revision_header(doc, response)
|
def react_document(request, document_slug, document_locale):
"""
View a wiki document.
"""
# If any query parameter is used that is only supported by the wiki view,
# redirect to the wiki domain.
if frozenset(request.GET) & WIKI_ONLY_DOCUMENT_QUERY_PARAMS:
return redirect_to_wiki(request)
slug_dict = split_slug(document_slug)
# Is there a document at this slug, in this locale?
doc, fallback_reason = _get_doc_and_fallback_reason(document_locale, document_slug)
if doc is None:
# We can throw a 404 immediately if the request type is HEAD.
# TODO: take a shortcut if the document was found?
if request.method == "HEAD":
raise Http404
# Check if we should fall back to default locale.
fallback_doc, fallback_reason, redirect_url = _default_locale_fallback(
request, document_slug, document_locale
)
if fallback_doc is not None:
doc = fallback_doc
if redirect_url is not None:
return redirect(redirect_url)
else:
# It could be that the document you're trying to view was deleted and
# the reason we're not finding a fallback is the because the slug
# doesn't match.
# E.g. you're trying to view `/sv-SE/docs/Foö/Bår` but that document
# was soft-deleted and its parent was `/en-US/docs/Foo/Bar`
if document_locale != settings.LANGUAGE_CODE: # Not in English!
redirect_url = _get_deleted_parent_redirect_url(
document_locale, document_slug
)
if redirect_url:
return redirect(redirect_url)
raise Http404
# We found a Document. Now we need to figure out how we're going
# to display it.
# If we're a redirect, and redirecting hasn't been disabled, redirect.
# Obey explicit redirect pages:
# Don't redirect on redirect=no (like Wikipedia), so we can link from a
# redirected-to-page back to a "Redirected from..." link, so you can edit
# the redirect.
redirect_url = (
None if request.GET.get("redirect") == "no" else doc.get_redirect_url()
)
if redirect_url and redirect_url != doc.get_absolute_url():
url = urlparams(redirect_url, query_dict=request.GET)
# TODO: Re-enable the link in this message after Django >1.5 upgrade
# Redirected from <a href="%(url)s?redirect=no">%(url)s</a>
messages.add_message(
request,
messages.WARNING,
mark_safe(
gettext("Redirected from %(url)s")
% {"url": request.build_absolute_uri(doc.get_absolute_url())}
),
extra_tags="wiki_redirect",
)
return HttpResponsePermanentRedirect(url)
# Get the SEO summary
seo_summary = doc.get_summary_text()
# Get the additional title information, if necessary.
seo_parent_title = _get_seo_parent_title(doc, slug_dict, document_locale)
# Get the JSON data for this document
doc_api_data = document_api_data(doc)
document_data = doc_api_data["documentData"]
def robots_index():
if fallback_reason:
return False
if not doc.html:
return False
if doc.is_experiment:
return False
if doc.has_legacy_namespace:
return False
if doc.has_noindex_slug:
return False
if request.get_host() not in settings.ALLOW_ROBOTS_WEB_DOMAINS:
return False
return True
robots_meta_content = "index, follow" if robots_index() else "noindex, nofollow"
# Bundle it all up and, finally, return.
context = {
"document_data": document_data,
# TODO: anything we're actually using in the template ought
# to be bundled up into the json object above instead.
"seo_summary": seo_summary,
"seo_parent_title": seo_parent_title,
"robots_meta_content": robots_meta_content,
}
response = render(request, "wiki/react_document.html", context)
return _add_kuma_revision_header(doc, response)
|
32,244 |
def sort_list_of_dicts_by_key(args: Dict[str, Any]) -> CommandResults:
_list = args.get('value', None)
key = args.get('key', None)
reverse_flag = args.get('reverse', False)
if reverse_flag == 'True':
reverse = True
else:
reverse = False
if not _list:
raise ValueError('List not provided')
result = _list.sort(key=lambda x: x[key], reverse=reverse)
return CommandResults(
outputs_prefix='Sorted List',
outputs_key_field='SortedList',
outputs=_list,
)
|
def sort_list_of_dicts_by_key(args: Dict[str, Any]) -> CommandResults:
_list = args.get('value', None)
key = args.get('key', None)
reverse_flag = argToBoolean(args.get('reverse', False))
if not _list:
raise ValueError('List not provided')
result = _list.sort(key=lambda x: x[key], reverse=reverse)
return CommandResults(
outputs_prefix='Sorted List',
outputs_key_field='SortedList',
outputs=_list,
)
|
3,516 |
def _serve_docs(request, final_project, path):
"""
Serve documentation in the way specified by settings.
Serve from the filesystem if using PYTHON_MEDIA We definitely shouldn't do
this in production, but I don't want to force a check for DEBUG.
"""
if not path.startswith('/proxito/'):
if path[0] == '/':
path = path[1:]
path = f'/proxito/{path}'
if settings.PYTHON_MEDIA:
return _serve_docs_nginx(
request, final_project=final_project, path=path
)
return _serve_docs_nginx(request, final_project=final_project, path=path)
|
def _serve_docs(request, final_project, path):
"""
Serve documentation in the way specified by settings.
Serve from the filesystem if using PYTHON_MEDIA We definitely shouldn't do
this in production, but I don't want to force a check for DEBUG.
"""
if not path.startswith('/proxito/'):
if path[0] == '/':
path = path[1:]
path = f'/proxito/{path}'
if settings.PYTHON_MEDIA:
return _serve_docs_python(
request, final_project=final_project, path=path
)
return _serve_docs_nginx(request, final_project=final_project, path=path)
|
31,139 |
def is_file_exist(public_storage_bucket, storage_client):
blob = public_storage_bucket.blob(STORAGE_ID_SET_PATH)
return blob.exists(storage_client)
|
def file_exists_in_bucket(public_storage_bucket, storage_client):
blob = public_storage_bucket.blob(STORAGE_ID_SET_PATH)
return blob.exists(storage_client)
|
12,033 |
def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result..
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no tranposes required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
|
def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result..
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no transpose required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
|
6,382 |
def _get_agents_sorted_by_asc_workload():
appointments = frappe.db.get_list('Appointment', fields='*')
agent_list = _get_agent_list_as_strings()
if not appointments:
return agent_list
appointment_counter = Counter(agent_list)
for appointment in appointments:
assigned_to = frappe.parse_json(appointment._assign)
print(assigned_to)
if appointment._assign == '[]' or not appointment._assign:
continue
if assigned_to[0] in agent_list:
appointment_counter[assigned_to[0]] += 1
sorted_agent_list = appointment_counter.most_common()
sorted_agent_list.reverse()
return sorted_agent_list
|
def _get_agents_sorted_by_asc_workload():
appointments = frappe.db.get_list('Appointment', fields='*')
agent_list = _get_agent_list_as_strings()
if not appointments:
return agent_list
appointment_counter = Counter(agent_list)
for appointment in appointments:
assigned_to = frappe.parse_json(appointment._assign)
print(assigned_to)
if not assigned_to:
continue
if assigned_to[0] in agent_list:
appointment_counter[assigned_to[0]] += 1
sorted_agent_list = appointment_counter.most_common()
sorted_agent_list.reverse()
return sorted_agent_list
|
53,782 |
def busmap_by_hac(network, n_clusters, buses_i=None, branch_components=["Line", "Link"], feature=None):
"""
Create a busmap accroding to Hierarchical Agglomerative Clustering.
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None|pandas.Index
Subset of buses to cluster. If None, all buses are considered.
branch_components: List
Subset of all branch_components in the network. Defaults to ["Line", "Link"].
feature: None | pandas.DataFrame
Feature to be considered for the clustering.
The DataFrame must be indexed with buses_i.
If None, all buses have the same similarity.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to k-means clusters (indexed by
non-negative integers).
"""
if find_spec('sklearn') is None:
raise ModuleNotFoundError("Optional dependency 'sklearn' not found."
"Install via 'conda install -c conda-forge scikit-learn' "
"or 'pip install scikit-learn'")
from sklearn.cluster import AgglomerativeClustering as HAC
if buses_i is None:
buses_i = network.buses.index
if feature is None:
logger.warning(
"No feature is specified for Hierarchical Clustering. "
"Falling back to default, where all buses have equal similarity. "
"You can specify a feature as pandas.DataFrame indexed with buses_i."
)
feature = pd.DataFrame(index=buses_i, columns=[""], data=0)
buses_x = network.buses.index.get_indexer(buses_i)
A = network.adjacency_matrix(branch_components=branch_components).todense()
A = sp.sparse.coo_matrix(A[buses_x].T[buses_x].T)
labels = HAC(n_clusters=n_clusters,
connectivity=A,
affinity='euclidean',
linkage='ward').fit_predict(feature)
busmap = pd.Series(data=labels, index=buses_i, dtype='str')
return busmap
|
def busmap_by_hac(network, n_clusters, buses_i=None, branch_components=["Line", "Link"], feature=None):
"""
Create a busmap accroding to Hierarchical Agglomerative Clustering.
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None|pandas.Index
Subset of buses to cluster. If None, all buses are considered.
branch_components: List
Subset of all branch_components in the network. Defaults to ["Line", "Link"].
feature: None | pandas.DataFrame
Feature to be considered for the clustering.
The DataFrame must be indexed with buses_i.
If None, all buses have the same similarity.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to k-means clusters (indexed by
non-negative integers).
"""
if find_spec('sklearn') is None:
raise ModuleNotFoundError("Optional dependency 'sklearn' not found."
"Install via 'conda install -c conda-forge scikit-learn' "
"or 'pip install scikit-learn'")
from sklearn.cluster import AgglomerativeClustering as HAC
if buses_i is None:
buses_i = network.buses.index
if feature is None:
logger.warning(
"No feature is specified for Hierarchical Clustering. "
"Falling back to default, where all buses have equal similarity. "
"You can specify a feature as pandas.Series indexed with buses_i."
)
feature = pd.Series(0, index=buses_i)
buses_x = network.buses.index.get_indexer(buses_i)
A = network.adjacency_matrix(branch_components=branch_components).todense()
A = sp.sparse.coo_matrix(A[buses_x].T[buses_x].T)
labels = HAC(n_clusters=n_clusters,
connectivity=A,
affinity='euclidean',
linkage='ward').fit_predict(feature)
busmap = pd.Series(data=labels, index=buses_i, dtype='str')
return busmap
|
45,588 |
def ReminderCancelled(
name: Optional[Text] = None,
intent_name: Optional[Text] = None,
entities: Union[List[Dict[Text, Any]], Dict[Text, Text]] = None,
timestamp: Optional[float] = None,
) -> EventType:
return {
"event": "cancel_reminder",
"timestamp": timestamp,
"intent": intent_name,
"entities": entities,
"name": name,
}
|
def ReminderCancelled(
name: Optional[Text] = None,
intent_name: Optional[Text] = None,
entities: Optional[Union[List[Dict[Text, Any]], Dict[Text, Text]]] = None,
timestamp: Optional[float] = None,
) -> EventType:
return {
"event": "cancel_reminder",
"timestamp": timestamp,
"intent": intent_name,
"entities": entities,
"name": name,
}
|
48,963 |
def test_waits_for_visibility_minimum_needed_time(session_browser):
page = GivenPage(session_browser.driver)
page.opened_with_body(
'''
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>'''
).execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";', 0.5
)
element = session_browser.element('a')
# stamp_before = time.time_ns()
element.click()
# stamp_after = time.time_ns()
# deviation_sec = 0.2
# assert stamp_after - stamp_before < (0.5 + deviation_sec) * 1000000
assert "second" in session_browser.driver.current_url
|
def test_waits_for_visibility_minimum_needed_time(session_browser):
page = GivenPage(session_browser.driver)
page.opened_with_body(
'''
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>'''
).execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";', 0.5
)
element = session_browser.element('a')
# stamp_before = time.time_ns()
element.click()
# stamp_after = time.time_ns()
# deviation_sec = 0.2
# assert stamp_after - stamp_before < (0.5 + deviation_sec) * 1000000000
# deviation_sec = 0.2
# assert stamp_after - stamp_before < (0.5 + deviation_sec) * pow(10, 9)
assert "second" in session_browser.driver.current_url
|
10,115 |
def deprecation_warning(module):
deprecated_aliases = ['login_token']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Aliases \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
|
def deprecation_warning(module):
deprecated_aliases = ['login_token']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Alias \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
|
30,846 |
def map_scim(clientData):
try:
clientData = json.loads(clientData)
except Exception:
pass
if type(clientData) != dict:
raise Exception('Provided client data is not JSON compatible')
scim_extension = INPUT_SCIM_EXTENSION_KEY.replace('.', '\.')
mapping = {
"active": "active",
"addressCountry": "addresses(val.primary && val.primary==true).[0].country",
"addressFormatted": "addresses(val.primary && val.primary==true).[0].formatted",
"addressLocailty": "addresses(val.primary && val.primary==true).[0].locality",
"addressPostalCode": "addresses(val.primary && val.primary==true).[0].postalCode",
"addressRegion": "addresses(val.primary && val.primary==true).[0].region",
"addressStreeetAddress": "addresses(val.primary && val.primary==true).[0].streetAddress",
"addressType": "addresses(val.primary && val.primary==true).[0].type",
"costCenter": scim_extension + ".costCenter",
"department": scim_extension + ".department",
"displayName": "displayName",
"division": scim_extension + ".division",
"email": "emails(val.primary && val.primary==true).[0].value",
"emailType": "emails(val.primary && val.primary==true).[0].type",
"employeeNumber": scim_extension + ".employeeNumber",
"groups": "groups(val.display).display",
"id": "id",
"externalId": "externalId",
"locale": "locale",
"manager": scim_extension + ".manager.value",
"nameFormatted": "name.formatted",
"nameFamilyName": "name.familyName",
"nameGivenName": "name.givenName",
"nameHonorificPrefix": "name.honorificPrefix",
"nameHonorificSuffix": "name.honorificSuffix",
"nameMiddleName": "name.middleName",
"nickName": "nickName",
"organization": scim_extension + ".organization",
"password": "password",
"photo": "photos(val.type && val.type=='photo').[0].value",
"preferredLanguage": "preferredLanguage",
"profileUrl": "profileUrl",
"thumbnnail": "photos(val.type && val.type=='thumbnail').[0].value",
"timezone": "timezone",
"title": "title",
"userName": "userName",
"userType": "userType",
}
ret = dict()
for k, v in mapping.items():
try:
ret[k] = demisto.dt(clientData, v)
except Exception:
ret[k] = None
return ret
|
def map_scim(clientData):
try:
clientData = json.loads(clientData)
except Exception:
pass
if type(clientData) != dict:
raise Exception('Provided client data is not JSON compatible')
scim_extension = INPUT_SCIM_EXTENSION_KEY.replace('.', '\.')
mapping = {
"active": "active",
"addressCountry": "addresses(val.primary && val.primary==true).[0].country",
"addressFormatted": "addresses(val.primary && val.primary==true).[0].formatted",
"addressLocailty": "addresses(val.primary && val.primary==true).[0].locality",
"addressPostalCode": "addresses(val.primary && val.primary==true).[0].postalCode",
"addressRegion": "addresses(val.primary && val.primary==true).[0].region",
"addressStreeetAddress": "addresses(val.primary && val.primary==true).[0].streetAddress",
"addressType": "addresses(val.primary && val.primary==true).[0].type",
"costCenter": scim_extension + ".costCenter",
"department": scim_extension + ".department",
"displayName": "displayName",
"division": scim_extension + ".division",
"email": "emails(val.primary && val.primary==true).[0].value",
"emailType": "emails(val.primary && val.primary==true).[0].type",
"employeeNumber": scim_extension + ".employeeNumber",
"groups": "groups(val.display).display",
"id": "id",
"externalId": "externalId",
"locale": "locale",
"manager": scim_extension + ".manager.value",
"nameFormatted": "name.formatted",
"nameFamilyName": "name.familyName",
"nameGivenName": "name.givenName",
"nameHonorificPrefix": "name.honorificPrefix",
"nameHonorificSuffix": "name.honorificSuffix",
"nameMiddleName": "name.middleName",
"nickName": "nickName",
"organization": scim_extension + ".organization",
"password": "password",
"photo": "photos(val.type && val.type=='photo').[0].value",
"preferredLanguage": "preferredLanguage",
"profileUrl": "profileUrl",
"thumbnnail": "photos(val.type && val.type=='thumbnail').[0].value",
"timezone": "timezone",
"title": "title",
"userName": "userName",
"userType": "userType",
}
ret = dict()
for k, v in mapping.items():
try:
ret[k] = demisto.dt(client_data, v)
except Exception:
ret[k] = None
return ret
|
38,064 |
def list_sample_data():
"""
Report datasets available for tests and documentation examples.
Returns
-------
dict
Names and short descriptions of available sample datasets.
See Also
--------
load_sample_data : Load an example dataset from the GMT server.
"""
names = {
"bathymetry": "Table of ship bathymetric observations off Baja California",
"fractures": "Table of hypothetical fracture lengths and azimuths",
"hotspots": "Table of locations, names, and symbol sizes of hotpots from "
" Mueller et al., 1993",
"japan_quakes": "Table of earthquakes around Japan from NOAA NGDC database",
"mars_shape": "Table of topographic signature of the hemispheric dichotomy of "
" Mars from Smith and Zuber (1996)",
"ocean_ridge_points": "Table of ocean ridge points for the entire world",
"static_earth_relief": "Sample grid used for testing",
"usgs_quakes": "Table of global earthquakes from the USGS",
}
return names
|
def list_sample_data():
"""
Report datasets available for tests and documentation examples.
Returns
-------
dict
Names and short descriptions of available sample datasets.
See Also
--------
load_sample_data : Load an example dataset from the GMT server.
"""
names = {
"bathymetry": "Table of ship bathymetric observations off Baja California",
"fractures": "Table of hypothetical fracture lengths and azimuths",
"hotspots": "Table of locations, names, and symbol sizes of hotpots from "
" Mueller et al., 1993",
"japan_quakes": "Table of earthquakes around Japan from NOAA NGDC database",
"mars_shape": "Table of topographic signature of the hemispheric dichotomy of "
" Mars from Smith and Zuber (1996)",
"ocean_ridge_points": "Table of ocean ridge points for the entire world",
"static_earth_relief": "Sample grid used for testing based on Sandwell et al., 2022",
"usgs_quakes": "Table of global earthquakes from the USGS",
}
return names
|
20,052 |
def check_datasets_active(
dataset_ids: List[int],
raise_error_if_not_exist: bool = True,
) -> Dict[int, bool]:
"""
Check if the dataset ids provided are active.
Raises an error if a dataset_id in the given list
of dataset_ids does not exist on the server.
Parameters
----------
dataset_ids : List[int]
A list of integers representing dataset ids.
raise_error_if_not_exist : bool, optional (default=True)
Flag that if activated can raise an error, if one or more of the
given dataset ids do not exist on the server.
Returns
-------
dict
A dictionary with items {did: bool}
"""
dataset_list = list_datasets(
dataset_ids=dataset_ids,
status="all",
)
active = {}
for did in dataset_ids:
dataset = dataset_list.get(did, None)
if dataset is None:
if raise_error_if_not_exist:
raise ValueError(f'Could not find dataset {did} in OpenML dataset list.')
else:
active[did] = dataset["status"] == "active"
return active
|
def check_datasets_active(
dataset_ids: List[int],
raise_error_if_not_exist: bool = True,
) -> Dict[int, bool]:
"""
Check if the dataset ids provided are active.
Raises an error if a dataset_id in the given list
of dataset_ids does not exist on the server.
Parameters
----------
dataset_ids : List[int]
A list of integers representing dataset ids.
raise_error_if_not_exist : bool (default=True)
Flag that if activated can raise an error, if one or more of the
given dataset ids do not exist on the server.
Returns
-------
dict
A dictionary with items {did: bool}
"""
dataset_list = list_datasets(
dataset_ids=dataset_ids,
status="all",
)
active = {}
for did in dataset_ids:
dataset = dataset_list.get(did, None)
if dataset is None:
if raise_error_if_not_exist:
raise ValueError(f'Could not find dataset {did} in OpenML dataset list.')
else:
active[did] = dataset["status"] == "active"
return active
|
23,132 |
def test_roll_bug():
# This bug was exposed in GitHub Issue #8723
x = da.arange(2, 3)
y = da.roll(x, 1)
y[0] = 0
assert x[0].compute() == 2
|
def test_roll_always_results_in_a_new_array():
# This bug was exposed in GitHub Issue #8723
x = da.arange(2, 3)
y = da.roll(x, 1)
y[0] = 0
assert x[0].compute() == 2
|
1,374 |
def type_of_target_is_consistent_for_array_like_entities():
y_list = [[1, 1], [0, 1]]
y_np_ndarray = np.asarray(y_list)
expected_type_of_target = "multilable_indicator"
type_of_target_list = type_of_target(y_list)
type_of_target_nd_array = type_of_target(y_np_ndarray)
assert type_of_target_list == expected_type_of_target
assert type_of_target_list == type_of_target_nd_array
|
def type_of_target_multilabel_2d_lists():
y_list = [[1, 1], [0, 1]]
y_np_ndarray = np.asarray(y_list)
expected_type_of_target = "multilable_indicator"
type_of_target_list = type_of_target(y_list)
type_of_target_nd_array = type_of_target(y_np_ndarray)
assert type_of_target_list == expected_type_of_target
assert type_of_target_list == type_of_target_nd_array
|
7,285 |
def inverse(data, impulse_response=None, filter_params={}, max_gain=2,
predefined_filter=None):
"""Apply the filter in reverse to the given data.
Parameters
----------
data : (M,N) ndarray
Input data.
impulse_response : callable `f(r, c, **filter_params)`
Impulse response of the filter. See LPIFilter2D.__init__.
filter_params : dict
Additional keyword parameters to the impulse_response function.
max_gain : float
Limit the filter gain. Often, the filter contains zeros, which would
cause the inverse filter to have infinite gain. High gain causes
amplification of artefacts, so a conservative limit is recommended.
Other Parameters
----------------
predefined_filter : LPIFilter2D
If you need to apply the same filter multiple times over different
images, construct the LPIFilter2D and specify it here.
"""
check_nD(data, 2, 'data')
if predefined_filter is None:
filt = LPIFilter2D(impulse_response, **filter_params)
else:
filt = predefined_filter
F, G = filt._prepare(data)
_min_limit(F, val=np.finfo(float).eps)
F = 1 / F
mask = np.abs(F) > max_gain
F[mask] = np.sign(F[mask]) * max_gain
return _centre(np.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape)
|
def inverse(data, impulse_response=None, filter_params={}, max_gain=2,
predefined_filter=None):
"""Apply the filter in reverse to the given data.
Parameters
----------
data : (M,N) ndarray
Input data.
impulse_response : callable `f(r, c, **filter_params)`
Impulse response of the filter. See LPIFilter2D.__init__.
filter_params : dict
Additional keyword parameters to the impulse_response function.
max_gain : float
Limit the filter gain. Often, the filter contains zeros, which would
cause the inverse filter to have infinite gain. High gain causes
amplification of artefacts, so a conservative limit is recommended.
Other Parameters
----------------
predefined_filter : LPIFilter2D
If you need to apply the same filter multiple times over different
images, construct the LPIFilter2D and specify it here.
"""
check_nD(data, 2, 'data')
if predefined_filter is None:
filt = LPIFilter2D(impulse_response, **filter_params)
else:
filt = predefined_filter
F, G = filt._prepare(data)
_min_limit(F, val=eps)
F = 1 / F
mask = np.abs(F) > max_gain
F[mask] = np.sign(F[mask]) * max_gain
return _centre(np.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape)
|
3,078 |
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if compat._mpl_ge_3_2_0():
row_num = lambda x: x.get_subplotspec().rowspan.start
col_num = lambda x: x.get_subplotspec().rowspan.start
else:
row_num = lambda x: x.rowNum
col_num = lambda x: x.colNum
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
layout[row_num(ax), col_num(ax)] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[row_num(ax) + 1, col_num(ax)]:
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.yaxis)
|
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if compat._mpl_ge_3_2_0():
row_num = lambda x: x.get_subplotspec().rowspan.start
col_num = lambda x: x.get_subplotspec().colspan.start
else:
row_num = lambda x: x.rowNum
col_num = lambda x: x.colNum
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
layout[row_num(ax), col_num(ax)] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[row_num(ax) + 1, col_num(ax)]:
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.yaxis)
|
31,647 |
def main():
LOG(f'{demisto.command()} is called')
try:
if demisto.command() == 'test-module':
if get_health():
return_results('ok')
else:
return_results('nok')
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_run_time_range=FIRST_RUN_TIME_RANGE)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'cov-secpr-list-alerts':
r = list_alerts()
results = CommandResults(
outputs_prefix='Covalence.Alert',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-sensors':
r = list_sensors()
results = CommandResults(
outputs_prefix='Covalence.Sensors',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-get-sensor':
r = get_sensor()
results = CommandResults(
outputs_prefix='Covalence.Sensor',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-connections-summary-ip':
r = connections_summary_by_ip()
results = CommandResults(
outputs_prefix='Covalence.Connections',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-connections-summary-port':
r = connections_summary_by_port()
results = CommandResults(
outputs_prefix='Covalence.Connections',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-dns-resolutions':
r = list_dns_resolutions()
results = CommandResults(
outputs_prefix='Covalence.DNSResolutions',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-internal-networks':
r = list_internal_networks()
results = CommandResults(
outputs_prefix='Covalence.InternalNetworks',
outputs_key_field='cidr',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-set-internal-networks':
r = set_internal_networks()
if r is True:
r = list_internal_networks()
results = CommandResults(
outputs_prefix='Covalence.InternalNetworks',
outputs_key_field='cidr',
outputs=r
)
return_results(results)
else:
msg = f'Failed to set internal networks'
LOG(msg)
LOG.print_log()
elif demisto.command() == 'cov-secpr-list-endpoint-agents':
r = list_endpoint_agents()
results = CommandResults(
outputs_prefix='Covalence.EndpointAgents',
outputs_key_field='agentUuid',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-user':
r = find_endpoint_by_user()
results = CommandResults(
outputs_prefix='Covalence.EndpointAgents',
outputs_key_field='agentUuid',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-uuid':
r = find_endpoint_by_uuid()
results = CommandResults(
outputs_prefix='Covalence.EndpointAgents',
outputs_key_field='agentUuid',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-search-endpoint-process':
r = search_endpoint_process()
results = CommandResults(
outputs_prefix='Covalence.EndpointProcess',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-search-endpoint-installed-software':
r = search_endpoint_installed_software()
results = CommandResults(
outputs_prefix='Covalence.EndpointSoftware',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-organizations':
r = list_org()
results = CommandResults(
outputs_prefix='Covalence.EndpointSoftware',
outputs_key_field='id',
outputs=r
)
return_results(results)
else:
msg = f'Unknown command {demisto.command()}'
LOG(msg)
LOG.print_log()
except Exception as e:
LOG(traceback.format_exc())
LOG.print_log()
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}\n{traceback.format_exc()}')
|
def main():
demisto.info(f'{demisto.command()} is called')
try:
if demisto.command() == 'test-module':
if get_health():
return_results('ok')
else:
return_results('nok')
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_run_time_range=FIRST_RUN_TIME_RANGE)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'cov-secpr-list-alerts':
r = list_alerts()
results = CommandResults(
outputs_prefix='Covalence.Alert',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-sensors':
r = list_sensors()
results = CommandResults(
outputs_prefix='Covalence.Sensors',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-get-sensor':
r = get_sensor()
results = CommandResults(
outputs_prefix='Covalence.Sensor',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-connections-summary-ip':
r = connections_summary_by_ip()
results = CommandResults(
outputs_prefix='Covalence.Connections',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-connections-summary-port':
r = connections_summary_by_port()
results = CommandResults(
outputs_prefix='Covalence.Connections',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-dns-resolutions':
r = list_dns_resolutions()
results = CommandResults(
outputs_prefix='Covalence.DNSResolutions',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-internal-networks':
r = list_internal_networks()
results = CommandResults(
outputs_prefix='Covalence.InternalNetworks',
outputs_key_field='cidr',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-set-internal-networks':
r = set_internal_networks()
if r is True:
r = list_internal_networks()
results = CommandResults(
outputs_prefix='Covalence.InternalNetworks',
outputs_key_field='cidr',
outputs=r
)
return_results(results)
else:
msg = f'Failed to set internal networks'
LOG(msg)
LOG.print_log()
elif demisto.command() == 'cov-secpr-list-endpoint-agents':
r = list_endpoint_agents()
results = CommandResults(
outputs_prefix='Covalence.EndpointAgents',
outputs_key_field='agentUuid',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-user':
r = find_endpoint_by_user()
results = CommandResults(
outputs_prefix='Covalence.EndpointAgents',
outputs_key_field='agentUuid',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-find-endpoint-agents-by-uuid':
r = find_endpoint_by_uuid()
results = CommandResults(
outputs_prefix='Covalence.EndpointAgents',
outputs_key_field='agentUuid',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-search-endpoint-process':
r = search_endpoint_process()
results = CommandResults(
outputs_prefix='Covalence.EndpointProcess',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-search-endpoint-installed-software':
r = search_endpoint_installed_software()
results = CommandResults(
outputs_prefix='Covalence.EndpointSoftware',
outputs_key_field='id',
outputs=r
)
return_results(results)
elif demisto.command() == 'cov-secpr-list-organizations':
r = list_org()
results = CommandResults(
outputs_prefix='Covalence.EndpointSoftware',
outputs_key_field='id',
outputs=r
)
return_results(results)
else:
msg = f'Unknown command {demisto.command()}'
LOG(msg)
LOG.print_log()
except Exception as e:
LOG(traceback.format_exc())
LOG.print_log()
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}\n{traceback.format_exc()}')
|
27,388 |
def per_core_work(
topology_file_path: str, trajectory_file_path: str,
list_square_vertex_arrays_this_core: List[List[float]],
MDA_selection: str, start_frame: int, end_frame: int,
reconstruction_index_list: Sequence[int],
maximum_delta_magnitude: float,
) -> List[Tuple[int, List[float]]]:
"""Run the analysis on one core.
The code to perform on a given core given the list of square vertices assigned to it.
"""
# obtain the relevant coordinates for particles of interest
universe_object = MDAnalysis.Universe(topology_file_path, trajectory_file_path)
list_previous_frame_centroids: List[int] = []
list_previous_frame_indices: List[int] = []
#define some utility functions for trajectory iteration:
def produce_list_indices_point_in_polygon_this_frame(
vertex_coord_list: Iterable[int]) -> Iterable[int]:
list_indices_point_in_polygon = []
for square_vertices in vertex_coord_list:
path_object = matplotlib.path.Path(square_vertices)
index_list_in_polygon = np.where(path_object.contains_points(relevant_particle_coordinate_array_xy))
list_indices_point_in_polygon.append(index_list_in_polygon)
return list_indices_point_in_polygon
# typing: numpy
def produce_list_centroids_this_frame(
list_indices_in_polygon: Iterable[int]
) -> Union[List[np.ndarray], List[str]]:
list_centroids_this_frame = []
for indices in list_indices_in_polygon:
if not indices[0].size > 0: # if there are no particles of interest in this particular square
list_centroids_this_frame.append('empty')
else:
current_coordinate_array_in_square = relevant_particle_coordinate_array_xy[indices]
current_square_indices_centroid = np.average(current_coordinate_array_in_square, axis=0)
list_centroids_this_frame.append(current_square_indices_centroid)
return list_centroids_this_frame # a list of numpy xy centroid arrays for this frame
for ts in universe_object.trajectory:
if ts.frame < start_frame: # don't start until first specified frame
continue
relevant_particle_coordinate_array_xy = universe_object.select_atoms(MDA_selection).positions[..., :-1]
# only 2D / xy coords for now
#I will need a list of indices for relevant particles falling within each square in THIS frame:
list_indices_in_squares_this_frame = produce_list_indices_point_in_polygon_this_frame(
list_square_vertex_arrays_this_core)
#likewise, I will need a list of centroids of particles in each square (same order as above list):
list_centroids_in_squares_this_frame = produce_list_centroids_this_frame(list_indices_in_squares_this_frame)
if list_previous_frame_indices: # if the previous frame had indices in at least one square I will need to use
# those indices to generate the updates to the corresponding centroids in this frame:
list_centroids_this_frame_using_indices_from_last_frame = produce_list_centroids_this_frame(
list_previous_frame_indices)
#I need to write a velocity of zero if there are any 'empty' squares in either frame:
xy_deltas_to_write = []
for square_1_centroid, square_2_centroid in zip(list_centroids_this_frame_using_indices_from_last_frame,
list_previous_frame_centroids):
if square_1_centroid == 'empty' or square_2_centroid == 'empty':
xy_deltas_to_write.append([0, 0])
else:
xy_deltas_to_write.append(np.subtract(square_1_centroid, square_2_centroid).tolist())
#xy_deltas_to_write = np.subtract(np.array(
# list_centroids_this_frame_using_indices_from_last_frame),np.array(list_previous_frame_centroids))
xy_deltas_to_write = np.array(xy_deltas_to_write)
#now filter the array to only contain distances in the range [-8,8] as a placeholder for dealing with PBC
# issues (Matthieu seemed to use a limit of 8 as well);
xy_deltas_to_write = np.clip(xy_deltas_to_write, -maximum_delta_magnitude, maximum_delta_magnitude)
#with the xy and dx,dy values calculated I need to set the values from this frame to previous frame
# values in anticipation of the next frame:
list_previous_frame_centroids = list_centroids_in_squares_this_frame[:]
list_previous_frame_indices = list_indices_in_squares_this_frame[:]
else: # either no points in squares or after the first frame I'll just reset the 'previous' values so they
# can be used when consecutive frames have proper values
list_previous_frame_centroids = list_centroids_in_squares_this_frame[:]
list_previous_frame_indices = list_indices_in_squares_this_frame[:]
if ts.frame > end_frame:
break # stop here
return list(zip(reconstruction_index_list, xy_deltas_to_write.tolist()))
|
def per_core_work(
topology_file_path: str, trajectory_file_path: str,
list_square_vertex_arrays_this_core: List[List[int]],
MDA_selection: str, start_frame: int, end_frame: int,
reconstruction_index_list: Sequence[int],
maximum_delta_magnitude: float,
) -> List[Tuple[int, List[float]]]:
"""Run the analysis on one core.
The code to perform on a given core given the list of square vertices assigned to it.
"""
# obtain the relevant coordinates for particles of interest
universe_object = MDAnalysis.Universe(topology_file_path, trajectory_file_path)
list_previous_frame_centroids: List[int] = []
list_previous_frame_indices: List[int] = []
#define some utility functions for trajectory iteration:
def produce_list_indices_point_in_polygon_this_frame(
vertex_coord_list: Iterable[int]) -> Iterable[int]:
list_indices_point_in_polygon = []
for square_vertices in vertex_coord_list:
path_object = matplotlib.path.Path(square_vertices)
index_list_in_polygon = np.where(path_object.contains_points(relevant_particle_coordinate_array_xy))
list_indices_point_in_polygon.append(index_list_in_polygon)
return list_indices_point_in_polygon
# typing: numpy
def produce_list_centroids_this_frame(
list_indices_in_polygon: Iterable[int]
) -> Union[List[np.ndarray], List[str]]:
list_centroids_this_frame = []
for indices in list_indices_in_polygon:
if not indices[0].size > 0: # if there are no particles of interest in this particular square
list_centroids_this_frame.append('empty')
else:
current_coordinate_array_in_square = relevant_particle_coordinate_array_xy[indices]
current_square_indices_centroid = np.average(current_coordinate_array_in_square, axis=0)
list_centroids_this_frame.append(current_square_indices_centroid)
return list_centroids_this_frame # a list of numpy xy centroid arrays for this frame
for ts in universe_object.trajectory:
if ts.frame < start_frame: # don't start until first specified frame
continue
relevant_particle_coordinate_array_xy = universe_object.select_atoms(MDA_selection).positions[..., :-1]
# only 2D / xy coords for now
#I will need a list of indices for relevant particles falling within each square in THIS frame:
list_indices_in_squares_this_frame = produce_list_indices_point_in_polygon_this_frame(
list_square_vertex_arrays_this_core)
#likewise, I will need a list of centroids of particles in each square (same order as above list):
list_centroids_in_squares_this_frame = produce_list_centroids_this_frame(list_indices_in_squares_this_frame)
if list_previous_frame_indices: # if the previous frame had indices in at least one square I will need to use
# those indices to generate the updates to the corresponding centroids in this frame:
list_centroids_this_frame_using_indices_from_last_frame = produce_list_centroids_this_frame(
list_previous_frame_indices)
#I need to write a velocity of zero if there are any 'empty' squares in either frame:
xy_deltas_to_write = []
for square_1_centroid, square_2_centroid in zip(list_centroids_this_frame_using_indices_from_last_frame,
list_previous_frame_centroids):
if square_1_centroid == 'empty' or square_2_centroid == 'empty':
xy_deltas_to_write.append([0, 0])
else:
xy_deltas_to_write.append(np.subtract(square_1_centroid, square_2_centroid).tolist())
#xy_deltas_to_write = np.subtract(np.array(
# list_centroids_this_frame_using_indices_from_last_frame),np.array(list_previous_frame_centroids))
xy_deltas_to_write = np.array(xy_deltas_to_write)
#now filter the array to only contain distances in the range [-8,8] as a placeholder for dealing with PBC
# issues (Matthieu seemed to use a limit of 8 as well);
xy_deltas_to_write = np.clip(xy_deltas_to_write, -maximum_delta_magnitude, maximum_delta_magnitude)
#with the xy and dx,dy values calculated I need to set the values from this frame to previous frame
# values in anticipation of the next frame:
list_previous_frame_centroids = list_centroids_in_squares_this_frame[:]
list_previous_frame_indices = list_indices_in_squares_this_frame[:]
else: # either no points in squares or after the first frame I'll just reset the 'previous' values so they
# can be used when consecutive frames have proper values
list_previous_frame_centroids = list_centroids_in_squares_this_frame[:]
list_previous_frame_indices = list_indices_in_squares_this_frame[:]
if ts.frame > end_frame:
break # stop here
return list(zip(reconstruction_index_list, xy_deltas_to_write.tolist()))
|
52,445 |
def test_serialize_kb_disk(en_vocab):
# baseline assertions
kb1 = _get_dummy_kb(en_vocab)
_check_kb(kb1)
# dumping to file & loading back in
with make_tempdir() as d:
dir_path = ensure_path(d)
if not dir_path.exists():
dir_path.mkdir()
file_path = dir_path / "kb"
print(file_path, type(file_path))
kb1.dump(str(file_path))
kb2 = KnowledgeBase(vocab=en_vocab, entity_vector_length=3)
kb2.load_bulk(str(file_path))
# final assertions
_check_kb(kb2)
|
def test_serialize_kb_disk(en_vocab):
# baseline assertions
kb1 = _get_dummy_kb(en_vocab)
_check_kb(kb1)
# dumping to file & loading back in
with make_tempdir() as d:
dir_path = ensure_path(d)
if not dir_path.exists():
dir_path.mkdir()
file_path = dir_path / "kb"
kb1.dump(str(file_path))
kb2 = KnowledgeBase(vocab=en_vocab, entity_vector_length=3)
kb2.load_bulk(str(file_path))
# final assertions
_check_kb(kb2)
|
27,963 |
def parse_unique_log(compilation_database,
report_dir,
compile_uniqueing="none",
compiler_info_file=None,
keep_gcc_fix_headers=False,
analysis_skip_handler=None,
pre_analysis_skip_handler=None):
"""
This function reads up the compilation_database
and returns with a list of build actions that is prepared for clang
execution. That means that gcc specific parameters are filtered out
and gcc built in targets and include paths are added.
It also filters out duplicate compilation actions based on the
compile_uniqueing parameter.
This function also dumps auto-detected the compiler info
into <report_dir>/compiler_info.json.
compilation_database -- A compilation database as a list of dict objects.
These object should contain "file", "dictionary"
and "command" keys. The "command" may be replaced
by "arguments" which is a split command. Older
versions of intercept-build provide the build
command this way.
report_dir -- The output report directory. The compiler infos
will be written to <report_dir>/compiler.info.json.
compile_uniqueing -- Compilation database uniqueing mode.
If there are more than one compile commands for a
target file, only a single one is kept.
compiler_info_file -- compiler_info.json. If exists, it will be used for
analysis.
keep_gcc_fix_headers -- There are some implicit include paths which are
only used by GCC (include-fixed). This flag
determines whether these should be kept among
the implicit include paths.
Separate skip handlers are required because it is possible that different
files are skipped during pre analysis and the actual analysis. In the
pre analysis step nothing should be skipped to collect the required
information for the analysis step where not all the files are analyzed.
analysis_skip_handler -- skip handler for files which should be skipped
during analysis
pre_analysis_skip_handler -- skip handler for files wich should be skipped
during pre analysis
"""
try:
uniqued_build_actions = dict()
if compile_uniqueing == "alpha":
build_action_uniqueing = CompileActionUniqueingType.SOURCE_ALPHA
elif compile_uniqueing == "none":
build_action_uniqueing = CompileActionUniqueingType.NONE
elif compile_uniqueing == "strict":
build_action_uniqueing = CompileActionUniqueingType.STRICT
else:
build_action_uniqueing = CompileActionUniqueingType.SOURCE_REGEX
uniqueing_re = re.compile(compile_uniqueing)
for entry in compilation_database:
# Skip parsing the compilaton commands if it should be skipped
# at both analysis phases (pre analysis and analysis).
full_path = os.path.join(entry["directory"], entry["file"])
if analysis_skip_handler \
and analysis_skip_handler.should_skip(full_path) \
and pre_analysis_skip_handler \
and pre_analysis_skip_handler.should_skip(full_path):
continue
action = parse_options(entry,
compiler_info_file,
keep_gcc_fix_headers)
if not action.lang:
continue
if action.action_type != BuildAction.COMPILE:
continue
if build_action_uniqueing == CompileActionUniqueingType.NONE:
if action.__hash__ not in uniqued_build_actions:
uniqued_build_actions[action.__hash__] = action
elif build_action_uniqueing == CompileActionUniqueingType.STRICT:
if action.source not in uniqued_build_actions:
uniqued_build_actions[action.source] = action
else:
LOG.error("Build Action uniqueing failed"
" as both '%s' and '%s'",
uniqued_build_actions[action.source]
.original_command,
action.original_command)
sys.exit(1)
elif build_action_uniqueing ==\
CompileActionUniqueingType.SOURCE_ALPHA:
if action.source not in uniqued_build_actions:
uniqued_build_actions[action.source] = action
elif action.output <\
uniqued_build_actions[action.source].output:
uniqued_build_actions[action.source] = action
elif build_action_uniqueing ==\
CompileActionUniqueingType.SOURCE_REGEX:
LOG.debug("uniqueing regex")
if action.source not in uniqued_build_actions:
uniqued_build_actions[action.source] = action
elif uniqueing_re.match(action.original_command) and\
not uniqueing_re.match(
uniqued_build_actions[action.source].original_command):
uniqued_build_actions[action.source] = action
elif uniqueing_re.match(action.original_command) and\
uniqueing_re.match(
uniqued_build_actions[action.source].original_command):
LOG.error("Build Action uniqueing failed as both \n %s"
"\n and \n %s \n match regex pattern:%s",
uniqued_build_actions[action.source].
original_command,
action.original_command,
compile_uniqueing)
sys.exit(1)
compiler_info_out = os.path.join(report_dir, "compiler_info.json")
with open(compiler_info_out, 'w') as f:
LOG.debug("Writing compiler info into:"+compiler_info_out)
json.dump(ImplicitCompilerInfo.get(), f)
LOG.debug('Parsing log file done.')
return list(uniqued_build_actions.values())
except (ValueError, KeyError, TypeError) as ex:
if not compilation_database:
LOG.error('The compile database is empty.')
else:
LOG.error('The compile database is not valid.')
LOG.debug(traceback.format_exc())
LOG.debug(ex)
sys.exit(1)
|
def parse_unique_log(compilation_database,
report_dir,
compile_uniqueing="none",
compiler_info_file=None,
keep_gcc_fix_headers=False,
analysis_skip_handler=None,
pre_analysis_skip_handler=None):
"""
This function reads up the compilation_database
and returns with a list of build actions that is prepared for clang
execution. That means that gcc specific parameters are filtered out
and gcc built in targets and include paths are added.
It also filters out duplicate compilation actions based on the
compile_uniqueing parameter.
This function also dumps auto-detected the compiler info
into <report_dir>/compiler_info.json.
compilation_database -- A compilation database as a list of dict objects.
These object should contain "file", "dictionary"
and "command" keys. The "command" may be replaced
by "arguments" which is a split command. Older
versions of intercept-build provide the build
command this way.
report_dir -- The output report directory. The compiler infos
will be written to <report_dir>/compiler.info.json.
compile_uniqueing -- Compilation database uniqueing mode.
If there are more than one compile commands for a
target file, only a single one is kept.
compiler_info_file -- compiler_info.json. If exists, it will be used for
analysis.
keep_gcc_fix_headers -- There are some implicit include paths which are
only used by GCC (include-fixed). This flag
determines whether these should be kept among
the implicit include paths.
Separate skip handlers are required because it is possible that different
files are skipped during pre analysis and the actual analysis. In the
pre analysis step nothing should be skipped to collect the required
information for the analysis step where not all the files are analyzed.
analysis_skip_handler -- skip handler for files which should be skipped
during analysis
pre_analysis_skip_handler -- skip handler for files wich should be skipped
during pre analysis
"""
try:
uniqued_build_actions = dict()
if compile_uniqueing == "alpha":
build_action_uniqueing = CompileActionUniqueingType.SOURCE_ALPHA
elif compile_uniqueing == "none":
build_action_uniqueing = CompileActionUniqueingType.NONE
elif compile_uniqueing == "strict":
build_action_uniqueing = CompileActionUniqueingType.STRICT
else:
build_action_uniqueing = CompileActionUniqueingType.SOURCE_REGEX
uniqueing_re = re.compile(compile_uniqueing)
for entry in compilation_database:
# Skip parsing the compilaton commands if it should be skipped
# at both analysis phases (pre analysis and analysis).
full_path = os.path.join(entry["directory"], entry["file"])
if analysis_skip_handler \
and analysis_skip_handler.should_skip(full_path) \
and pre_analysis_skip_handler \
and pre_analysis_skip_handler.should_skip(full_path):
continue
action = parse_options(entry,
compiler_info_file,
keep_gcc_fix_headers)
if not action.lang:
continue
if action.action_type != BuildAction.COMPILE:
continue
if build_action_uniqueing == CompileActionUniqueingType.NONE:
if action.__hash__ not in uniqued_build_actions:
uniqued_build_actions[action.__hash__] = action
elif build_action_uniqueing == CompileActionUniqueingType.STRICT:
if action.source not in uniqued_build_actions:
uniqued_build_actions[action.source] = action
else:
LOG.error("Build Action uniqueing failed"
" as both '%s' and '%s'",
uniqued_build_actions[action.source]
.original_command,
action.original_command)
sys.exit(1)
elif build_action_uniqueing ==\
CompileActionUniqueingType.SOURCE_ALPHA:
if action.source not in uniqued_build_actions:
uniqued_build_actions[action.source] = action
elif action.output <\
uniqued_build_actions[action.source].output:
uniqued_build_actions[action.source] = action
elif build_action_uniqueing ==\
CompileActionUniqueingType.SOURCE_REGEX:
LOG.debug("uniqueing regex")
if action.source not in uniqued_build_actions:
uniqued_build_actions[action.source] = action
elif uniqueing_re.match(action.original_command) and\
not uniqueing_re.match(
uniqued_build_actions[action.source].original_command):
uniqued_build_actions[action.source] = action
elif uniqueing_re.match(action.original_command) and\
uniqueing_re.match(
uniqued_build_actions[action.source].original_command):
LOG.error("Build Action uniqueing failed as both \n %s"
"\n and \n %s \n match regex pattern:%s",
uniqued_build_actions[action.source].
original_command,
action.original_command,
compile_uniqueing)
sys.exit(1)
compiler_info_out = os.path.join(report_dir, "compiler_info.json")
with open(compiler_info_out, 'w') as f:
LOG.debug("Writing compiler info into:"+compiler_info_out)
json.dump(ImplicitCompilerInfo.get(), f)
LOG.debug('Parsing log file done.')
return list(uniqued_build_actions.values())
except (ValueError, KeyError, TypeError) as ex:
if not compilation_database:
LOG.error('The compile database is empty.')
else:
LOG.error('The compile database is not valid.')
LOG.debug(traceback.format_exc())
LOG.debug(ex)
sys.exit(1)
|
1,457 |
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy="warn"):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000 or n_samples)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default="warn")
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array). If True, a copy of `X` is transformed,
leaving the original `X` unchanged
.. deprecated:: 0.21
The default value of parameter `copy` will be changed from False
to True in 0.23. The current default of False is being changed to
make it more consistent with the default `copy` values of other
functions in :mod:`sklearn.preprocessing._data`. Furthermore, the
current default of False may have unexpected side effects by
modifying the value of `X` inplace
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if copy == "warn":
warnings.warn("The default value of `copy` will change from False to "
"True in 0.23 in order to make it more consistent with "
"the default `copy` values of other functions in "
":mod:`sklearn.preprocessing._data` and prevent "
"unexpected side effects by modifying the value of `X` "
"inplace. To avoid inplace modifications of `X`, it is "
"recommended to explicitly set `copy=True`",
FutureWarning)
copy = False
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
|
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy="warn"):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000 or n_samples)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default="warn")
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array). If True, a copy of `X` is transformed,
leaving the original `X` unchanged
.. deprecated:: 0.21
The default value of parameter `copy` will be changed from False
to True in 0.23. The current default of False is being changed to
make it more consistent with the default `copy` values of other
functions in :mod:`sklearn.preprocessing`. Furthermore, the
current default of False may have unexpected side effects by
modifying the value of `X` inplace
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if copy == "warn":
warnings.warn("The default value of `copy` will change from False to "
"True in 0.23 in order to make it more consistent with "
"the default `copy` values of other functions in "
":mod:`sklearn.preprocessing._data` and prevent "
"unexpected side effects by modifying the value of `X` "
"inplace. To avoid inplace modifications of `X`, it is "
"recommended to explicitly set `copy=True`",
FutureWarning)
copy = False
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
|
31,315 |
def cyren_feed_relationship(args) -> CommandResults:
if "indicator" not in args:
raise ValueError("Please provide 'indicator' argument!")
indicator = args["indicator"]
if isinstance(indicator, str):
try:
indicator = json.loads(indicator)
except json.JSONDecodeError:
raise ValueError("Please provide JSON-encoded 'indicator' param!")
elif not isinstance(indicator, dict):
raise ValueError("Please provide JSON-encoded 'indicator' param!")
headers = args.get("columns", ACCEPTED_HEADERS)
if isinstance(headers, str):
headers = [s.strip() for s in headers.split(",")]
check_acceptable_headers(headers)
relationships = indicator.get("CustomFields", {}).get("cyrenfeedrelationships", []) or []
content = []
for item in relationships:
ioc_value = item.get("value", "")
results = demisto.searchIndicators(value=ioc_value).get("iocs", [])
if results:
result = results[0]
ioc_score = result.get("score")
ioc_id = result.get("id")
content.append(create_relationship_object(
value=f"[{ioc_value}](#/indicator/{ioc_id})" if ioc_value else "",
relationship_type=item.get("relationshiptype"),
indicator_type=item.get("indicatortype"),
timestamp=item.get("timestamp"),
entity_category=item.get("entitycategory"),
reputation=ioc_score,
))
else:
# In case that no related indicators were found, return the table without the link.
content.append(create_relationship_object(
value=ioc_value,
relationship_type=item.get("relationshiptype"),
indicator_type=item.get("indicatortype"),
timestamp=item.get("timestamp"),
entity_category=item.get("entitycategory"),
))
output = tableToMarkdown("", content, headers, removeNull=True)
return CommandResults(readable_output=output)
|
def cyren_feed_relationship(args) -> CommandResults:
if "indicator" not in args:
raise ValueError("Please provide 'indicator' argument!")
indicator = args["indicator"]
if isinstance(indicator, str):
try:
indicator = json.loads(indicator)
except json.JSONDecodeError:
raise ValueError("Please provide JSON-encoded 'indicator' param!")
elif not isinstance(indicator, dict):
raise ValueError("Please provide JSON-encoded 'indicator' param!")
headers = args.get("columns", ACCEPTED_HEADERS)
if isinstance(headers, str):
headers = [s.strip() for s in headers.split(",")]
check_acceptable_headers(headers)
relationships = indicator.get("CustomFields", {}).get("cyrenfeedrelationships", []) or []
content = []
for item in relationships:
ioc_value = item.get("value", "")
results = demisto.searchIndicators(value=ioc_value).get("iocs", [])
if results and isinstance(results, list):
result = results[0]
ioc_score = result.get("score")
ioc_id = result.get("id")
content.append(create_relationship_object(
value=f"[{ioc_value}](#/indicator/{ioc_id})" if ioc_value else "",
relationship_type=item.get("relationshiptype"),
indicator_type=item.get("indicatortype"),
timestamp=item.get("timestamp"),
entity_category=item.get("entitycategory"),
reputation=ioc_score,
))
else:
# In case that no related indicators were found, return the table without the link.
content.append(create_relationship_object(
value=ioc_value,
relationship_type=item.get("relationshiptype"),
indicator_type=item.get("indicatortype"),
timestamp=item.get("timestamp"),
entity_category=item.get("entitycategory"),
))
output = tableToMarkdown("", content, headers, removeNull=True)
return CommandResults(readable_output=output)
|
33,111 |
def get_time_as_str(t, timezone=None):
if timezone is None:
timezone = config.get("TIMEZONE")
s = (t - datetime.utcnow()).total_seconds()
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
d = timedelta(hours=h, minutes=m, seconds=s)
if timezone is not None:
disappear_time = datetime.now(tz=timezone) + d
else:
disappear_time = datetime.now() + d
# Time remaining in minutes and seconds
time_left = "%dm %ds" % (m, s) if h == 0 else "%dh %dm" % (h, m)
# Disappear time in 12h format, eg "2:30:16 PM"
time_12 = disappear_time.strftime("%I:%M:%S") \
+ disappear_time.strftime("%p").lower()
# Disappear time in 24h format including seconds, eg "14:30:16"
time_24 = disappear_time.strftime("%H:%M:%S")
# Get the same as above but without seconds
time_left_minutes = "%dm" % m if h == 0 else "%dh %dm" % (h, m)
time_12_minutes = disappear_time.strftime("%I:%M") \
+ disappear_time.strftime("%p").lower()
time_24_minutes = disappear_time.strftime("%H:%M")
return time_left, time_12, time_24, \
time_left_minutes, time_12_minutes, time_24_minutes
|
def get_time_as_str(t, timezone=None):
if timezone is None:
timezone = config.get("TIMEZONE")
s = (t - datetime.utcnow()).total_seconds()
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
d = timedelta(hours=h, minutes=m, seconds=s)
if timezone is not None:
disappear_time = datetime.now(tz=timezone) + d
else:
disappear_time = datetime.now() + d
# Time remaining in minutes and seconds
time_left = "%dm %ds" % (m, s) if h == 0 else "%dh %dm" % (h, m)
# Disappear time in 12h format, eg "2:30:16 PM"
time_12 = disappear_time.strftime("%I:%M:%S") \
+ disappear_time.strftime("%p").lower()
# Disappear time in 24h format including seconds, eg "14:30:16"
time_24 = disappear_time.strftime("%H:%M:%S")
# Get the same as above but without seconds
time_left_minutes = "%dm" % m if h == 0 else "%dh %dm" % (h, m)
time_12_minutes = disappear_time.strftime("%I:%M") \
+ disappear_time.strftime("%p").lower()
time_24_minutes = disappear_time.strftime("%H:%M")
return time, time_12h, time_24h, \
time_left_minutes, time_12_minutes, time_24_minutes
|
6,962 |
def clear_activity_logs(days=None):
"""clear 90 day old authentication logs or configured in log settings"""
if not days:
days = 90
doctype = DocType("Activity Log")
duration = (Now() - Interval(days=days))
frappe.db.delete(doctype, filters=(
doctype.creation < duration
))
|
def clear_activity_logs(days=None):
"""clear 90 day old authentication logs or configured in log settings"""
if not days:
days = 90
doctype = DocType("Activity Log")
duration = (Now() - Interval(days=days))
frappe.db.delete(doctype, filters=(
doctype.creation < (Now() - Interval(days=days))
))
|
30,621 |
def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('edl_size', 10000)), EDL_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), EDL_OFFSET_ERR_MSG)
query = request.args.get('q', params.get('indicators_query'))
strip_port = request.args.get('sp', params.get('url_port_stripping', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
# handle flags
if drop_invalids is not None and drop_invalids == '':
drop_invalids = True
if strip_port is not None and strip_port == '':
strip_port = True
if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, EDL_COLLAPSE_ERR_MSG)
if collapse_ips not in [0, 1, 2]:
raise DemistoException(EDL_COLLAPSE_ERR_MSG)
collapse_options = {
0: DONT_COLLAPSE,
1: COLLAPSE_TO_RANGES,
2: COLLAPSE_TO_CIDR
}
collapse_ips = collapse_options[collapse_ips]
return RequestArguments(query, limit, offset, strip_port, drop_invalids, collapse_ips)
|
def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('edl_size', 10000)), EDL_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), EDL_OFFSET_ERR_MSG)
query = request.args.get('q', params.get('indicators_query'))
strip_port = request.args.get('sp', params.get('url_port_stripping', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
# handle flags
if drop_invalids is not None and drop_invalids == '':
drop_invalids = True
if strip_port is not None and strip_port == '':
strip_port = True
if collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, EDL_COLLAPSE_ERR_MSG)
if collapse_ips not in [0, 1, 2]:
raise DemistoException(EDL_COLLAPSE_ERR_MSG)
collapse_options = {
0: DONT_COLLAPSE,
1: COLLAPSE_TO_RANGES,
2: COLLAPSE_TO_CIDR
}
collapse_ips = collapse_options[collapse_ips]
return RequestArguments(query, limit, offset, strip_port, drop_invalids, collapse_ips)
|
21,959 |
def estCoherence(outfile, corfile):
from mroipac.icu.Icu import Icu
#Create phase sigma correlation file here
filtImage = isceobj.createIntImage()
filtImage.load( outfile + '.xml')
filtImage.setAccessMode('read')
filtImage.createImage()
phsigImage = isceobj.createImage()
phsigImage.dataType='FLOAT'
phsigImage.bands = 1
phsigImage.setWidth(filtImage.getWidth())
phsigImage.setFilename(corfile)
phsigImage.setAccessMode('write')
phsigImage.createImage()
icuObj = Icu(name='sentinel_filter_icu')
icuObj.configure()
icuObj.unwrappingFlag = False
icuObj.useAmplitudeFlag = False
#icuObj.correlationType = 'NOSLOPE'
icuObj.icu(intImage = filtImage, phsigImage=phsigImage)
phsigImage.renderHdr()
filtImage.finalizeImage()
phsigImage.finalizeImage()
|
def estimate_coherence(outfile, corfile):
from mroipac.icu.Icu import Icu
#Create phase sigma correlation file here
filtImage = isceobj.createIntImage()
filtImage.load( outfile + '.xml')
filtImage.setAccessMode('read')
filtImage.createImage()
phsigImage = isceobj.createImage()
phsigImage.dataType='FLOAT'
phsigImage.bands = 1
phsigImage.setWidth(filtImage.getWidth())
phsigImage.setFilename(corfile)
phsigImage.setAccessMode('write')
phsigImage.createImage()
icuObj = Icu(name='sentinel_filter_icu')
icuObj.configure()
icuObj.unwrappingFlag = False
icuObj.useAmplitudeFlag = False
#icuObj.correlationType = 'NOSLOPE'
icuObj.icu(intImage = filtImage, phsigImage=phsigImage)
phsigImage.renderHdr()
filtImage.finalizeImage()
phsigImage.finalizeImage()
|
34,978 |
def dtype_is_supported(dtype):
return dtype == "float32" or dtype == ""
|
def dtype_is_supported(dtype):
return dtype in ("", "float32")
|
29,417 |
def load_keys(obj, path):
"""load all the keys specified at gived path in vault.
:param obj: settings object
:param path: path to the vault secrets
:return: list containing all the keys at the given path
"""
client = get_client(obj)
try:
return client.list("/secret/metadata/{}".format(path))["data"]["keys"]
except (KeyError, TypeError):
return []
|
def list_envs(obj, path):
"""load all the keys specified at gived path in vault.
:param obj: settings object
:param path: path to the vault secrets
:return: list containing all the keys at the given path
"""
client = get_client(obj)
try:
return client.list("/secret/metadata/{}".format(path))["data"]["keys"]
except (KeyError, TypeError):
return []
|
28,602 |
def plot_violin(
data,
var_names=None,
filter_vars=None,
transform=None,
quartiles=True,
rug=False,
hdi_prob=None,
shade=0.35,
bw="default",
circular=False,
sharex=True,
sharey=True,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
shade_kwargs=None,
rug_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""Plot posterior of traces as violin plot.
Notes
-----
If multiple chains are provided for a variable they will be combined
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function).
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the hdi_prob*100%
intervals. Defaults to ``True``.
rug: bool
If ``True`` adds a jittered rugplot. Defaults to ``False``.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94.
shade: float
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to 0.
bw: float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when ``circular`` is ``False``
and "taylor" (for now) when ``circular`` is ``True``.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
circular: bool, optional.
If ``True``, it interprets `values` is a circular variable measured in radians
and a circular KDE is used. Defaults to ``False``.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: int
Text size of the point_estimates, axis ticks, and highest density interval. If None it will
be autoscaled based on ``figsize``.
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
sharex: bool
Defaults to ``True``, violinplots share a common x-axis scale.
sharey: bool
Defaults to ``True``, violinplots share a common y-axis scale.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
shade_kwargs: dicts, optional
Additional keywords passed to ``fill_between``, or ``barh`` to control the shade.
rug_kwargs: dict
Keywords passed to the rug plot. If true only the right half side of the violin will be
plotted.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_forest: Forest plot to compare HDI intervals from a number of distributions.
Examples
--------
Show a default violin plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_violin(data)
Show a default violin plot, but with a transformation applied to the data
.. plot::
:context: close-figs
>>> az.plot_violin(data, var_names="tau", transform=np.log)
"""
if labeller is None:
labeller = BaseLabeller()
data = convert_to_dataset(data, group="posterior")
if transform is not None:
data = transform(data)
var_names = _var_names(var_names, data, filter_vars)
plotters = filter_plotters_list(
list(xarray_var_iter(data, var_names=var_names, combined=True)), "plot_violin"
)
rows, cols = default_grid(len(plotters), grid=grid)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
violinplot_kwargs = dict(
ax=ax,
plotters=plotters,
figsize=figsize,
rows=rows,
cols=cols,
sharex=sharex,
sharey=sharey,
shade_kwargs=shade_kwargs,
shade=shade,
rug=rug,
rug_kwargs=rug_kwargs,
bw=bw,
textsize=textsize,
labeller=labeller,
circular=circular,
hdi_prob=hdi_prob,
quartiles=quartiles,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_violin", "violinplot", backend)
ax = plot(**violinplot_kwargs)
return ax
|
def plot_violin(
data,
var_names=None,
filter_vars=None,
transform=None,
quartiles=True,
rug=False,
hdi_prob=None,
shade=0.35,
bw="default",
circular=False,
sharex=True,
sharey=True,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
shade_kwargs=None,
rug_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""Plot posterior of traces as violin plot.
Notes
-----
If multiple chains are provided for a variable they will be combined
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function).
quartiles: bool, optional
Flag for plotting the interquartile range, in addition to the hdi_prob*100%
intervals. Defaults to ``True``.
rug: bool
If ``True`` adds a jittered rugplot. Defaults to ``False``.
hdi_prob: float, optional
Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94.
shade: float
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to 0.
bw: float or str, optional
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when ``circular`` is ``False``
and "taylor" (for now) when ``circular`` is ``True``.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
circular: bool, optional.
If ``True``, it interprets `values` is a circular variable measured in radians
and a circular KDE is used. Defaults to ``False``.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: int
Text size of the point_estimates, axis ticks, and highest density interval. If None it will
be autoscaled based on ``figsize``.
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
sharex: bool
Defaults to ``True``, violinplots share a common x-axis scale.
sharey: bool
Defaults to ``True``, violinplots share a common y-axis scale.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
shade_kwargs: dicts, optional
Additional keywords passed to :meth:`matplotlib.axes.Axes.fill_between`, or :meth:`matplotlib.axes.Axes.barh` to control the shade.
rug_kwargs: dict
Keywords passed to the rug plot. If true only the right half side of the violin will be
plotted.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_forest: Forest plot to compare HDI intervals from a number of distributions.
Examples
--------
Show a default violin plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_violin(data)
Show a default violin plot, but with a transformation applied to the data
.. plot::
:context: close-figs
>>> az.plot_violin(data, var_names="tau", transform=np.log)
"""
if labeller is None:
labeller = BaseLabeller()
data = convert_to_dataset(data, group="posterior")
if transform is not None:
data = transform(data)
var_names = _var_names(var_names, data, filter_vars)
plotters = filter_plotters_list(
list(xarray_var_iter(data, var_names=var_names, combined=True)), "plot_violin"
)
rows, cols = default_grid(len(plotters), grid=grid)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
violinplot_kwargs = dict(
ax=ax,
plotters=plotters,
figsize=figsize,
rows=rows,
cols=cols,
sharex=sharex,
sharey=sharey,
shade_kwargs=shade_kwargs,
shade=shade,
rug=rug,
rug_kwargs=rug_kwargs,
bw=bw,
textsize=textsize,
labeller=labeller,
circular=circular,
hdi_prob=hdi_prob,
quartiles=quartiles,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_violin", "violinplot", backend)
ax = plot(**violinplot_kwargs)
return ax
|
27,451 |
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if "license" in license.lower() and "unlicense" not in license.lower():
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", "")
if license_file == "" and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match("^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: noarch doesn't work with selectors for runtime dependencies
if build_section.get("noarch") is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if build_section.get("noarch") is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req == str(language) or req.startswith(str(language))
]
filtered_run_reqs = [
req
for req in run_reqs
if req == str(language) or req.startswith(str(language))
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch: {0} packages should have a {0} requirement without any version constraints.".format(
str(language)
)
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
build_section.get("noarch") is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = yaml.load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
return lints, hints
|
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if "license" in license.lower() and "unlicense" not in license.lower():
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", "")
if license_file == "" and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match("^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: noarch doesn't work with selectors for runtime dependencies
if build_section.get("noarch") is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if build_section.get("noarch") is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req == str(language) or req.startswith(str(language))
]
filtered_run_reqs = [
req
for req in run_reqs
if req == str(language) or req.startswith(str(language))
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have a {0} requirement without any version constraints.".format(
str(language)
)
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
build_section.get("noarch") is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = yaml.load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
return lints, hints
|
12,201 |
def get_archspec_name():
from conda.base.context import non_x86_machines, _arch_names, _platform_map
target_plat, target_arch = context.subdir.split("-")
# This has to reverse what Context.subdir is doing
if target_plat == "linux" and target_arch in non_x86_machines:
machine = target_arch
elif target_arch == "zos":
return None
elif target_arch.isdigit():
machine = _arch_names[int(target_arch)]
else:
return None
# This has to match what Context.platform is doing
native_plat = _platform_map.get(sys.platform, 'unknown')
if native_plat != target_plat or platform.machine() != machine:
return machine
try:
import archspec.cpu
return str(archspec.cpu.host())
except ImportError:
return machine
|
def get_archspec_name():
from conda.base.context import non_x86_machines, _arch_names, _platform_map
target_plat, target_arch = context.subdir.split("-")
# This has to reverse what Context.subdir is doing
if target_arch in non_x86_machines:
machine = target_arch
elif target_arch == "zos":
return None
elif target_arch.isdigit():
machine = _arch_names[int(target_arch)]
else:
return None
# This has to match what Context.platform is doing
native_plat = _platform_map.get(sys.platform, 'unknown')
if native_plat != target_plat or platform.machine() != machine:
return machine
try:
import archspec.cpu
return str(archspec.cpu.host())
except ImportError:
return machine
|
2,523 |
def compute_optics_graph(
X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric=’precomputed’
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs,
)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(
X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
np.around(
core_distances_,
decimals=np.finfo(core_distances_.dtype).precision,
out=core_distances_,
)
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# Note that this implementation is O(n^2) theoretically, but
# supposedly with very low constant factors.
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(
core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed,
X=X,
nbrs=nbrs,
metric=metric,
metric_params=metric_params,
p=p,
max_eps=max_eps,
)
if np.all(np.isinf(reachability_)):
warnings.warn(
"All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning,
)
return ordering, core_distances_, reachability_, predecessor_
|
def compute_optics_graph(
X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs,
)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(
X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
np.around(
core_distances_,
decimals=np.finfo(core_distances_.dtype).precision,
out=core_distances_,
)
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# Note that this implementation is O(n^2) theoretically, but
# supposedly with very low constant factors.
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(
core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed,
X=X,
nbrs=nbrs,
metric=metric,
metric_params=metric_params,
p=p,
max_eps=max_eps,
)
if np.all(np.isinf(reachability_)):
warnings.warn(
"All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning,
)
return ordering, core_distances_, reachability_, predecessor_
|
33,878 |
def test_oversized_function(ray_start_shared_local_modes):
bar = np.zeros(100 * 1024)
@ray.remote
class Actor:
def foo(self):
return len(bar)
@ray.remote
def f():
return len(bar)
with pytest.raises(
ValueError, match="The remote function .*f is too large"):
f.remote()
with pytest.raises(ValueError, match="The actor Actor is too large"):
Actor.remote()
|
def test_oversized_function(ray_start_shared_local_modes):
bar = np.zeros(100 * 1024 * 125)
@ray.remote
class Actor:
def foo(self):
return len(bar)
@ray.remote
def f():
return len(bar)
with pytest.raises(
ValueError, match="The remote function .*f is too large"):
f.remote()
with pytest.raises(ValueError, match="The actor Actor is too large"):
Actor.remote()
|
9,619 |
def main():
module = AnsibleModule(
argument_spec=dict(
type=dict(required=True, choices=['user', 'group', 'project']),
name=dict(required=False, default=None),
mountpoint=dict(required=True),
bhard=dict(required=False, default=None),
bsoft=dict(required=False, default=None),
ihard=dict(required=False, default=None),
isoft=dict(required=False, default=None),
rtbhard=dict(required=False, default=None),
rtbsoft=dict(required=False, default=None),
state=dict(required=False, default='present', choices=['present', 'absent'])
),
supports_check_mode=True
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
changed = False
if os.getuid() != 0:
module.fail_json(msg='You need to be root to run this module')
if not os.path.ismount(mountpoint):
module.fail_json(msg='%s is not a mountpoint' % mountpoint)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] \
and 'usrquota' not in mp['mntopts'] \
and 'quota' not in mp['mntopts'] \
and 'uqnoenforce' not in mp['mntopts'] \
and 'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.'
% mountpoint
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
if quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)'
% (mountpoint, mp['mntopts'])
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg='/etc/projects doesn\'t exist.')
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg='/etc/projid doesn\'t exist.')
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r)
else:
for line in r['stdout']:
if '%s - project identifier is not set' in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r)
else:
changed = True
elif not prj_set and module.check_mode:
changed = True
changed = False
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
if bsoft is not None or bhard is not None:
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
if isoft is not None or ihard is not None:
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
if rtbsoft is not None or rtbhard is not None:
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
limit = []
if bsoft is not None and int(bsoft / 1024) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
if bhard is not None and int(bhard / 1024) != current_bhard:
limit.append('bhard=%s' % bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r)
else:
changed = True
elif len(limit) > 0 and module.check_mode:
changed = True
module.exit_json(changed=changed)
return True
|
def main():
module = AnsibleModule(
argument_spec=dict(
type=dict(required=True, choices=['user', 'group', 'project']),
name=dict(required=False, default=None),
mountpoint=dict(required=True),
bhard=dict(required=False, default=None),
bsoft=dict(required=False, default=None),
ihard=dict(required=False, default=None),
isoft=dict(required=False, default=None),
rtbhard=dict(required=False, default=None),
rtbsoft=dict(required=False, default=None),
state=dict(required=False, default='present', choices=['present', 'absent'])
),
supports_check_mode=True
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
changed = False
if os.getuid() != 0:
module.fail_json(msg='You need to be root to run this module', **result)
if not os.path.ismount(mountpoint):
module.fail_json(msg='%s is not a mountpoint' % mountpoint)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] \
and 'usrquota' not in mp['mntopts'] \
and 'quota' not in mp['mntopts'] \
and 'uqnoenforce' not in mp['mntopts'] \
and 'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.'
% mountpoint
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
if quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)'
% (mountpoint, mp['mntopts'])
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg='/etc/projects doesn\'t exist.')
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg='/etc/projid doesn\'t exist.')
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r)
else:
for line in r['stdout']:
if '%s - project identifier is not set' in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r)
else:
changed = True
elif not prj_set and module.check_mode:
changed = True
changed = False
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
if bsoft is not None or bhard is not None:
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
if isoft is not None or ihard is not None:
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
if rtbsoft is not None or rtbhard is not None:
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
limit = []
if bsoft is not None and int(bsoft / 1024) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
if bhard is not None and int(bhard / 1024) != current_bhard:
limit.append('bhard=%s' % bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r)
else:
changed = True
elif len(limit) > 0 and module.check_mode:
changed = True
module.exit_json(changed=changed)
return True
|
959 |
def to_cnf(expr, simplify=False, force=False):
"""
Convert a propositional logical sentence ``expr`` to conjunctive normal
form: ``((A | ~B | ...) & (B | C | ...) & ...)``.
If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest CNF
form using the Quine-McCluskey algorithm; this may take a long
time. If there are more than 8 variables the ``force`` flag must be set
to ``True`` to simplify (default is ``False``).
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
(D | ~A) & (D | ~B)
>>> to_cnf((A | B) & (A | ~A), True)
A | B
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
if not force and len(_find_predicates(expr)) > 8:
raise ValueError(filldedent('''
To simplify a logical expression with more
than 8 variables may take a long time and requires
the use of `force=True`.'''))
return simplify_logic(expr, 'cnf', True, force=force)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
res = distribute_and_over_or(expr)
return res
|
def to_cnf(expr, simplify=False, force=False):
"""
Convert a propositional logical sentence ``expr`` to conjunctive normal
form: ``((A | ~B | ...) & (B | C | ...) & ...)``.
If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest CNF
form using the Quine-McCluskey algorithm; this may take a long
time. If there are more than 8 variables the ``force`` flag must be set
to ``True`` to simplify (default is ``False``).
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
(D | ~A) & (D | ~B)
>>> to_cnf((A | B) & (A | ~A), True)
A | B
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
if not force and len(_find_predicates(expr)) > 8:
raise ValueError(filldedent('''
To simplify a logical expression with more
than 8 variables may take a long time and requires
the use of `force=True`.'''))
return simplify_logic(expr, 'cnf', True, force=force)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
res = distribute_and_over_or(expr)
return res
|
43,515 |
def generate_random_circuit(num_qubits):
"""
Generates a random quantum circuit based on (McClean et. al., 2019)
Args:
num_qubits (int): the number of qubits in the circuit
"""
dev = qml.device("default.qubit", wires=num_qubits)
gate_set = [qml.RX, qml.RY, qml.RZ]
random_gate_sequence = {i: np.random.choice(gate_set) for i in range(num_qubits)}
qcircuit = qml.QNode(rand_circuit, dev)
return qcircuit, random_gate_sequence
|
def generate_random_circuit(num_qubits):
"""
Generates a random quantum circuit based on (McClean et. al., 2019).
Args:
num_qubits (int): the number of qubits in the circuit
"""
dev = qml.device("default.qubit", wires=num_qubits)
gate_set = [qml.RX, qml.RY, qml.RZ]
random_gate_sequence = {i: np.random.choice(gate_set) for i in range(num_qubits)}
qcircuit = qml.QNode(rand_circuit, dev)
return qcircuit, random_gate_sequence
|
37,133 |
def measure(qubits: List[int],
schedule: Schedule,
inst_map: Optional[InstructionScheduleMap],
meas_map: List[List[int]],
backend: Optional['BaseBackend'] = None,
qubit_mem_slots: Optional[Dict[int, int]] = None) -> Schedule:
"""
This is a utility function to measure qubits using OpenPulse.
Args:
qubits: List of qubits to be measured.
schedule: Schedule of the circuit.
inst_map: Mapping of circuit operations to pulse schedules. If None, defaults to the
``circuit_instruction_map`` of ``backend``.
meas_map: List of sets of qubits that must be measured together. If None, defaults to
the ``meas_map`` of ``backend``.
backend: A backend instance, which contains hardware-specific data required for scheduling.
qubit_mem_slots: Mapping of measured qubit index to classical bit index.
Returns:
A schedule corresponding to the inputs provided.
"""
inst_map = inst_map or backend.defaults().circuit_instruction_map
meas_map = meas_map or backend.configuration().meas_map
measure_groups = set()
for qubit in qubits:
measure_groups.add(tuple(meas_map[qubit]))
for measure_group_qubits in measure_groups:
if qubit_mem_slots is not None:
unused_mem_slots = set(measure_group_qubits) - set(qubit_mem_slots.values())
default_sched = inst_map.get('measure', measure_group_qubits)
for time, inst in default_sched.instructions:
if qubit_mem_slots is not None and isinstance(inst, AcquireInstruction):
mem_slots = []
for channel in inst.acquires:
if channel.index in qubit_mem_slots.keys():
mem_slots.append(MemorySlot(qubit_mem_slots[channel.index]))
else:
mem_slots.append(MemorySlot(unused_mem_slots.pop()))
new_acquire = AcquireInstruction(command=inst.command,
acquires=inst.acquires,
mem_slots=mem_slots)
schedule = schedule.insert(time, new_acquire)
# Measurement pulses should only be added if its qubit was measured by the user
elif inst.channels[0].index in qubits:
schedule = schedule.insert(time, inst)
return schedule
|
def measure(qubits: List[int],
schedule: Schedule,
inst_map: Optional[InstructionScheduleMap],
meas_map: Optional[List[List[int]]] = None,
backend: Optional['BaseBackend'] = None,
qubit_mem_slots: Optional[Dict[int, int]] = None) -> Schedule:
"""
This is a utility function to measure qubits using OpenPulse.
Args:
qubits: List of qubits to be measured.
schedule: Schedule of the circuit.
inst_map: Mapping of circuit operations to pulse schedules. If None, defaults to the
``circuit_instruction_map`` of ``backend``.
meas_map: List of sets of qubits that must be measured together. If None, defaults to
the ``meas_map`` of ``backend``.
backend: A backend instance, which contains hardware-specific data required for scheduling.
qubit_mem_slots: Mapping of measured qubit index to classical bit index.
Returns:
A schedule corresponding to the inputs provided.
"""
inst_map = inst_map or backend.defaults().circuit_instruction_map
meas_map = meas_map or backend.configuration().meas_map
measure_groups = set()
for qubit in qubits:
measure_groups.add(tuple(meas_map[qubit]))
for measure_group_qubits in measure_groups:
if qubit_mem_slots is not None:
unused_mem_slots = set(measure_group_qubits) - set(qubit_mem_slots.values())
default_sched = inst_map.get('measure', measure_group_qubits)
for time, inst in default_sched.instructions:
if qubit_mem_slots is not None and isinstance(inst, AcquireInstruction):
mem_slots = []
for channel in inst.acquires:
if channel.index in qubit_mem_slots.keys():
mem_slots.append(MemorySlot(qubit_mem_slots[channel.index]))
else:
mem_slots.append(MemorySlot(unused_mem_slots.pop()))
new_acquire = AcquireInstruction(command=inst.command,
acquires=inst.acquires,
mem_slots=mem_slots)
schedule = schedule.insert(time, new_acquire)
# Measurement pulses should only be added if its qubit was measured by the user
elif inst.channels[0].index in qubits:
schedule = schedule.insert(time, inst)
return schedule
|
2,631 |
def test_confusion_matrix_normalize_single_class():
y_test = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 0, 0, 0, 0, 0, 0, 0]
cm_true = confusion_matrix(y_test, y_pred, normalize="true")
assert cm_true.sum() == pytest.approx(2.0)
# additionally check that no warnings are raised due to a division by zero
with warnings.catch_warnings():
warnings.simplefilter("error", UndefinedMetricWarning)
cm_pred = confusion_matrix(y_test, y_pred, normalize="pred")
assert cm_pred.sum() == pytest.approx(1.0)
with warnings.catch_warnings():
warnings.simplefilter("error", UndefinedMetricWarning)
_cm_pred = confusion_matrix(y_pred, y_test, normalize="true") # noqa: F841
|
def test_confusion_matrix_normalize_single_class():
y_test = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 0, 0, 0, 0, 0, 0, 0]
cm_true = confusion_matrix(y_test, y_pred, normalize="true")
assert cm_true.sum() == pytest.approx(2.0)
# additionally check that no warnings are raised due to a division by zero
with warnings.catch_warnings():
warnings.simplefilter("error", UndefinedMetricWarning)
cm_pred = confusion_matrix(y_test, y_pred, normalize="pred")
assert cm_pred.sum() == pytest.approx(1.0)
with warnings.catch_warnings():
warnings.simplefilter("error", UndefinedMetricWarning)
confusion_matrix(y_pred, y_test, normalize="true")
|
31,359 |
def entry_types_command(client: Client) -> CommandResults:
"""Get entry types list from TOPdesk"""
entry_types = client.get_list('/incidents/entry_types')
return command_with_all_fields_readable_list(results=entry_types,
result_name='entry types',
output_prefix='entryType',
outputs_key_field='id')
|
def entry_types_command(client: Client) -> CommandResults:
"""Get entry types list from TOPdesk"""
entry_types = client.get_list('/incidents/entry_types')
return command_with_all_fields_readable_list(results=entry_types,
result_name='entry types',
output_prefix='EntryType',
outputs_key_field='id')
|
31,292 |
def filter_installed_packs(packs_to_install: set) -> set:
"""
Filter packs that should be installed out from the installed packs set if they are:
- Content pack is not in skipped packs
- Content pack is certified
- Content pack is not deprecated
Args:
packs_to_install (set): Set of installed packs collected so far.
Returns:
(set): Set of packs without ignored, skipped and deprecated-packs.
"""
return {pack for pack in packs_to_install if should_install_content_pack(pack)}
|
def filter_installed_packs(packs_to_install: set) -> set:
"""
Filter only the packs that should get installed by the following conditions:
- Content pack is not in skipped packs
- Content pack is certified
- Content pack is not deprecated
Args:
packs_to_install (set): Set of installed packs collected so far.
Returns:
(set): Set of packs without ignored, skipped and deprecated-packs.
"""
return {pack for pack in packs_to_install if should_install_content_pack(pack)}
|
1,582 |
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, default=None
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
additional keyword arguments for the metric function.
include_self : bool or 'auto', default=None
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse graph in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
|
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
additional keyword arguments for the metric function.
include_self : bool or 'auto', default=None
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse graph in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
|
31,701 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
proxy = demisto.params().get('proxy', False)
base_url = demisto.getParam('api_endpoint')
access_id = demisto.getParam('access_id')
access_key = demisto.getParam('access_key')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '1 day'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
fetch_query = demisto.params().get('fetch_query')
record_summary_fields = demisto.params().get('record_summary_fields')
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=DEFAULT_HEADERS,
proxy=proxy,
auth=(access_id, access_key),
ok_codes=[200])
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'sumologic-sec-insight-get-details':
return_results(insight_get_details(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-insight-get-comments':
return_results(insight_get_comments(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-signal-get-details':
return_results(signal_get_details(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-entity-get-details':
return_results(entity_get_details(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-insight-search':
return_results(insight_search(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-entity-search':
return_results(entity_search(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-signal-search':
return_results(signal_search(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-insight-set-status':
return_results(insight_set_status(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-match-list-get':
return_results(match_list_get(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-match-list-update':
return_results(match_list_update(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-threat-intel-search-indicators':
return_results(threat_intel_search_indicators(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-threat-intel-get-sources':
return_results(threat_intel_get_sources(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-threat-intel-update-source':
return_results(threat_intel_update_source(client, demisto.args()))
elif demisto.command() == 'fetch-incidents':
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
fetch_query=fetch_query,
record_summary_fields=record_summary_fields
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to create
demisto.incidents(incidents)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
proxy = demisto.params().get('proxy', False)
base_url = demisto.getParam('api_endpoint')
access_id = demisto.getParam('access_id')
access_key = demisto.getParam('access_key')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '1 day'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
fetch_query = demisto.params().get('fetch_query')
record_summary_fields = demisto.params().get('record_summary_fields') or RECORD_SUMMARY_FIELDS_DEFAULT
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=DEFAULT_HEADERS,
proxy=proxy,
auth=(access_id, access_key),
ok_codes=[200])
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'sumologic-sec-insight-get-details':
return_results(insight_get_details(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-insight-get-comments':
return_results(insight_get_comments(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-signal-get-details':
return_results(signal_get_details(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-entity-get-details':
return_results(entity_get_details(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-insight-search':
return_results(insight_search(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-entity-search':
return_results(entity_search(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-signal-search':
return_results(signal_search(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-insight-set-status':
return_results(insight_set_status(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-match-list-get':
return_results(match_list_get(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-match-list-update':
return_results(match_list_update(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-threat-intel-search-indicators':
return_results(threat_intel_search_indicators(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-threat-intel-get-sources':
return_results(threat_intel_get_sources(client, demisto.args()))
elif demisto.command() == 'sumologic-sec-threat-intel-update-source':
return_results(threat_intel_update_source(client, demisto.args()))
elif demisto.command() == 'fetch-incidents':
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
fetch_query=fetch_query,
record_summary_fields=record_summary_fields
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to create
demisto.incidents(incidents)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
5,588 |
def pandas_dataframe_to_unit_arrays(df, column_units=None):
"""Attach units to data in pandas dataframes and return quantities.
Parameters
----------
df : `pandas.DataFrame`
Data in pandas dataframe.
column_units : dict
Dictionary of units to attach to columns of the dataframe. Overrides
the units attribute if it is attached to the dataframe.
Returns
-------
Dictionary containing `Quantity` instances arrays with keys corresponding to the
dataframe column names.
"""
if not column_units:
try:
column_units = df.units
except AttributeError:
raise ValueError('No units attribute attached to pandas '
'dataframe and col_units not given.') from None
# Iterate through columns attaching units if we have them, if not, don't touch it
res = {}
for column in df:
if column in column_units and column_units[column]:
res[column] = units.Quantity(df[column].values, column_units[column])
else:
res[column] = df[column].values
return res
|
def pandas_dataframe_to_unit_arrays(df, column_units=None):
"""Attach units to data in pandas dataframes and return quantities.
Parameters
----------
df : `pandas.DataFrame`
Data in pandas dataframe.
column_units : dict
Dictionary of units to attach to columns of the dataframe. Overrides
the units attribute if it is attached to the dataframe.
Returns
-------
Dictionary containing `Quantity` instances with keys corresponding to the
dataframe column names.
"""
if not column_units:
try:
column_units = df.units
except AttributeError:
raise ValueError('No units attribute attached to pandas '
'dataframe and col_units not given.') from None
# Iterate through columns attaching units if we have them, if not, don't touch it
res = {}
for column in df:
if column in column_units and column_units[column]:
res[column] = units.Quantity(df[column].values, column_units[column])
else:
res[column] = df[column].values
return res
|
54,277 |
def read_tasoc_lightcurve(filename,
flux_column="FLUX_RAW",
quality_bitmask="default"):
"""Returns a `TessLightCurve`.
Parameters
----------
filename : str
Local path or remote url of TASOC light curve FITS file.
flux_column : 'flux_RAW' - this contains the T'DA extracted lightcurve,
with no corrections applied to the raw light curves. Corrected lightcurves
may be a thing in the future as there is a flux_corr column.
quality_bitmask : For now this always none - as no calibration applied
"""
lc = read_generic_lightcurve(filename,
flux_column=flux_column.lower(),
time_format='btjd',
quality_column="QUALITY")
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
#quality_mask = TessQualityFlags.create_quality_mask(
# quality_array=lc['dquality'],
# bitmask=quality_bitmask)
#lc = lc[quality_mask]
lc.meta['TARGETID'] = lc.meta.get('TICID')
lc.meta['QUALITY_BITMASK'] = quality_bitmask
#lc.meta['QUALITY_MASK'] = quality_mask
# QLP light curves are normalized by default
lc.meta['NORMALIZED'] = True
return TessLightCurve(data=lc)
|
def read_tasoc_lightcurve(filename,
flux_column="FLUX_RAW",
quality_bitmask="default"):
"""Returns a `TessLightCurve`.
Parameters
----------
filename : str
Local path or remote url of TASOC light curve FITS file.
flux_column : 'flux_RAW' - this contains the T'DA extracted lightcurve,
with no corrections applied to the raw light curves. Corrected lightcurves
may be a thing in the future as there is a flux_corr column.
quality_bitmask : For now this always none - as no calibration applied
"""
lc = read_generic_lightcurve(filename,
flux_column=flux_column.lower(),
time_format='btjd',
quality_column="QUALITY")
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
#quality_mask = TessQualityFlags.create_quality_mask(
# quality_array=lc['dquality'],
# bitmask=quality_bitmask)
#lc = lc[quality_mask]
lc.meta['TARGETID'] = lc.meta.get('TICID')
lc.meta['QUALITY_BITMASK'] = quality_bitmask
#lc.meta['QUALITY_MASK'] = quality_mask
# TASOC light curves are normalized by default
lc.meta['NORMALIZED'] = True
return TessLightCurve(data=lc)
|
25,602 |
def is_tx_hash_bytes(bytes_: Any) -> bool:
"""
Check wether the `bytes_` is a correctly encoded transaction hash,
but do not query any blockchain node to check for transaction validity.
"""
if isinstance(bytes_, T_TransactionHash):
return bool(re.fullmatch("^0x([A-Fa-f0-9]{64})$", bytes_.hex()))
return False
|
def is_tx_hash_bytes(bytes_: Any) -> bool:
"""
Check wether the `bytes_` is a correctly encoded transaction hash,
but do not query any blockchain node to check for transaction validity.
"""
if isinstance(bytes_, T_TransactionHash):
return len(bytes_) == 32
return False
|
8,391 |
def template_match(observed_spectrum, spectral_templates,
resample_method="flux_conserving", min_redshift=None, max_redshift=None, delta_redshift=None):
"""
Find which spectral templates is the best fit to an observed spectrum by
computing the chi-squared. If two template_spectra have the same chi2, the
first template is returned.
Parameters
----------
observed_spectrum : :class:`~specutils.Spectrum1D`
The observed spectrum.
spectral_templates : :class:`~specutils.Spectrum1D` or :class:`~specutils.SpectrumCollection` or `list`
That will give a single :class:`~specutils.Spectrum1D` when iterated
over. The template spectra, which will be resampled, normalized, and
compared to the observed spectrum, where the smallest chi2 and
normalized template spectrum will be returned.
resample_method : `string`
Three resample options: flux_conserving, linear_interpolated, and spline_interpolated.
Anything else does not resample the spectrum.
min_redshift : `float`
The minimum redshift allowed
max_redshift : `float`
The maximum redshift allowed
delta_redshift : `float`
The amount the redshift will change between loops
Returns
-------
normalized_template_spectrum : :class:`~specutils.Spectrum1D`
The template spectrum that has been normalized.
chi2 : `float`
The chi2 of the flux of the observed_spectrum and the flux of the
normalized template spectrum.
smallest_chi_index : `int`
The index of the spectrum with the smallest chi2 in spectral templates.
"""
if hasattr(spectral_templates, 'flux') and len(spectral_templates.flux.shape) == 1:
# Account for redshift if provided
if min_redshift and max_redshift and delta_redshift:
redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectral_templates,
min_redshift, max_redshift, delta_redshift)
spectral_templates = redshifted_spectrum
normalized_spectral_template, chi2 = _chi_sqaure_for_templates(
observed_spectrum, spectral_templates, resample_method)
return normalized_spectral_template, chi2
# At this point, the template spectrum is either a ``SpectrumCollection``
# or a multi-dimensional``Spectrum1D``. Loop through the object and return
# the template spectrum with the lowest chi square and its corresponding
# chi square.
chi2_min = None
smallest_chi_spec = None
for index, spectrum in enumerate(spectral_templates):
# Account for redshift if provided
if min_redshift and max_redshift and delta_redshift:
redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectrum,
min_redshift, max_redshift, delta_redshift)
spectrum = redshifted_spectrum
normalized_spectral_template, chi2 = _chi_sqaure_for_templates(
observed_spectrum, spectrum, resample_method)
if chi2_min is None or chi2 < chi2_min:
chi2_min = chi2
smallest_chi_spec = normalized_spectral_template
smallest_chi_index = index
return smallest_chi_spec, chi2_min, smallest_chi_index
|
def template_match(observed_spectrum, spectral_templates,
resample_method="flux_conserving", min_redshift=None, max_redshift=None, delta_redshift=None):
"""
Find which spectral templates is the best fit to an observed spectrum by
computing the chi-squared. If two template_spectra have the same chi2, the
first template is returned.
Parameters
----------
observed_spectrum : :class:`~specutils.Spectrum1D`
The observed spectrum.
spectral_templates : :class:`~specutils.Spectrum1D` or :class:`~specutils.SpectrumCollection` or `list`
That will give a single :class:`~specutils.Spectrum1D` when iterated
over. The template spectra, which will be resampled, normalized, and
compared to the observed spectrum, where the smallest chi2 and
normalized template spectrum will be returned.
resample_method : `string`
Three resample options: flux_conserving, linear_interpolated, and spline_interpolated.
Anything else does not resample the spectrum.
min_redshift : `float`
The minimum redshift allowed
max_redshift : `float`
The maximum redshift allowed
delta_redshift : `float`
The amount the redshift will change between loops
Returns
-------
normalized_template_spectrum : :class:`~specutils.Spectrum1D`
The template spectrum that has been normalized.
chi2 : `float`
The chi2 of the flux of the observed_spectrum and the flux of the
normalized template spectrum.
smallest_chi_index : `int`
The index of the spectrum with the smallest chi2 in spectral templates.
"""
if hasattr(spectral_templates, 'flux') and len(spectral_templates.flux.shape) == 1:
# Account for redshift if provided
if min_redshift and max_redshift and delta_redshift:
redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectral_templates,
min_redshift, max_redshift, delta_redshift)
spectral_templates = redshifted_spectrum
normalized_spectral_template, chi2 = _chi_sqaure_for_templates(
observed_spectrum, spectral_templates, resample_method)
return normalized_spectral_template, chi2
# At this point, the template spectrum is either a ``SpectrumCollection``
# or a multi-dimensional``Spectrum1D``. Loop through the object and return
# the template spectrum with the lowest chi square and its corresponding
# chi square.
chi2_min = None
smallest_chi_spec = None
for index, spectrum in enumerate(spectral_templates):
# Account for redshift if provided
if min_redshift and max_redshift and delta_redshift:
redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectrum,
min_redshift, max_redshift, delta_redshift)
spectrum = redshifted_spectrum
normalized_spectral_template, chi2 = _chi_square_for_templates(
observed_spectrum, spectrum, resample_method)
if chi2_min is None or chi2 < chi2_min:
chi2_min = chi2
smallest_chi_spec = normalized_spectral_template
smallest_chi_index = index
return smallest_chi_spec, chi2_min, smallest_chi_index
|
54,217 |
def two_qubit_state_tomography(sampler: work.Sampler,
first_qubit: devices.GridQubit,
second_qubit: devices.GridQubit,
circuit: circuits.Circuit,
repetitions: int = 1000) -> TomographyResult:
r"""Two-qubit state tomography.
To measure the density matrix of the output state of a two-qubit circuit,
different combinations of I, X/2 and Y/2 operations are applied to the
two qubits before measurements in the z-basis to determine the state
probabilities $P_{00}, P_{01}, P_{10}.$
The density matrix rho is decomposed into an operator-sum representation
$\sum_{i, j} c_{ij} * \sigma_i \bigotimes \sigma_j$, where $i, j = 0, 1, 2,
3$ and $\sigma_0 = I, \sigma_1 = \sigma_x, \sigma_2 = \sigma_y, \sigma_3 =
\sigma_z$ are the single-qubit Identity and Pauli matrices.
Based on the measured probabilities probs and the transformations of the
measurement operator by different basis rotations, one can build an
overdetermined set of linear equations.
As an example, if the identity operation (I) is applied to both qubits, the
measurement operators are $(I +/- \sigma_z) \bigotimes (I +/- \sigma_z)$.
The state probabilities $P_{00}, P_{01}, P_{10}$ thus obtained contribute
to the following linear equations (setting $c_{00} = 1$):
$c_{03} + c_{30} + c_{33} = 4*P_{00} - 1$
$-c_{03} + c_{30} - c_{33} = 4*P_{01} - 1$
$c_{03} - c_{30} - c_{33} = 4*P_{10} - 1$
And if a Y/2 rotation is applied to the first qubit and a X/2 rotation
is applied to the second qubit before measurement, the measurement
operators are $(I -/+ \sigma_x) \bigotimes (I +/- \sigma_y)$. The
probabilities obtained instead contribute to the following linear equations:
$c_{02} - c_{10} - c_{12} = 4*P_{00} - 1$
$-c_{02} - c_{10} + c_{12} = 4*P_{01} - 1$
$c_{02} + c_{10} + c_{12} = 4*P_{10} - 1$
Note that this set of equations has the same form as the first set under
the transformation $c_{03}$ <-> $c_{02}, c_{30}$ <-> $-c_{10}$ and
$c_{33}$ <-> $-c_{12}$.
Since there are 9 possible combinations of rotations (each producing 3
independent probabilities) and a total of 15 unknown coefficients $c_{ij}$,
one can cast all the measurement results into a overdetermined set of
linear equations numpy.dot(mat, c) = probs. Here c is of length 15 and
contains all the $c_{ij}$'s (except $c_{00}$ which is set to 1), and mat
is a 27 by 15 matrix having three non-zero elements in each row that are
either 1 or -1.
The least-square solution to the above set of linear equations is then
used to construct the density matrix rho.
See Vandersypen and Chuang, Rev. Mod. Phys. 76, 1037 for details and
Steffen et al, Science 313, 1423 for a related experiment.
Args:
sampler: The quantum engine or simulator to run the circuits.
first_qubit: The first qubit under test.
second_qubit: The second qubit under test.
circuit: The circuit to execute on the qubits before tomography.
repetitions: The number of measurements for each basis rotation.
Returns:
A TomographyResult object that stores and plots the density matrix.
"""
# The size of the system of linear equations to be solved.
num_rows = 27
num_cols = 15
def _measurement(two_qubit_circuit: circuits.Circuit) -> np.ndarray:
two_qubit_circuit.append(ops.measure(first_qubit, second_qubit,
key='z'))
results = sampler.run(two_qubit_circuit, repetitions=repetitions)
results_hist = results.histogram(key='z')
prob_list = [results_hist[0], results_hist[1], results_hist[2]]
return np.asarray(prob_list) / repetitions
sigma_0 = np.eye(2) * 0.5
sigma_1 = np.array([[0.0, 1.0], [1.0, 0.0]]) * 0.5
sigma_2 = np.array([[0.0, -1.0j], [1.0j, 0.0]]) * 0.5
sigma_3 = np.array([[1.0, 0.0], [0.0, -1.0]]) * 0.5
sigmas = [sigma_0, sigma_1, sigma_2, sigma_3]
# Stores all 27 measured probabilities (P_00, P_01, P_10 after 9
# different basis rotations).
probs = np.array([])
rots = [ops.X**0, ops.X**0.5, ops.Y**0.5]
# Represents the coefficients in front of the c_ij's (-1, 0 or 1) in the
# system of 27 linear equations.
mat = np.zeros((num_rows, num_cols))
# Represents the relative signs between the linear equations for P_00,
# P_01, and P_10.
s = np.array([[1.0, 1.0, 1.0], [-1.0, 1.0, -1.0], [1.0, -1.0, -1.0]])
for i, rot_1 in enumerate(rots):
for j, rot_2 in enumerate(rots):
m_idx, indices, signs = _indices_after_basis_rot(i, j)
mat[m_idx:(m_idx + 3), indices] = s * np.tile(signs, (3, 1))
test_circuit = circuit + circuits.Circuit(rot_1(first_qubit))
test_circuit.append(rot_2(second_qubit))
probs = np.concatenate((probs, _measurement(test_circuit)))
c, _, _, _ = np.linalg.lstsq(mat, 4.0 * probs - 1.0, rcond=-1)
c = np.concatenate(([1.0], c))
c = c.reshape(4, 4)
rho = np.zeros((4, 4))
for i in range(4):
for j in range(4):
rho = rho + c[i, j] * np.kron(sigmas[i], sigmas[j])
return TomographyResult(rho)
|
def two_qubit_state_tomography(sampler: work.Sampler,
first_qubit: devices.GridQubit,
second_qubit: devices.GridQubit,
circuit: circuits.Circuit,
repetitions: int = 1000) -> TomographyResult:
r"""Two-qubit state tomography.
To measure the density matrix of the output state of a two-qubit circuit,
different combinations of I, X/2 and Y/2 operations are applied to the
two qubits before measurements in the z-basis to determine the state
probabilities $P_{00}, P_{01}, P_{10}.$
The density matrix rho is decomposed into an operator-sum representation
$\sum_{i, j} c_{ij} * \sigma_i \bigotimes \sigma_j$, where $i, j = 0, 1, 2,
3$ and $\sigma_0 = I, \sigma_1 = \sigma_x, \sigma_2 = \sigma_y, \sigma_3 =
\sigma_z$ are the single-qubit Identity and Pauli matrices.
Based on the measured probabilities probs and the transformations of the
measurement operator by different basis rotations, one can build an
overdetermined set of linear equations.
As an example, if the identity operation (I) is applied to both qubits, the
measurement operators are $(I +/- \sigma_z) \bigotimes (I +/- \sigma_z)$.
The state probabilities $P_{00}, P_{01}, P_{10}$ thus obtained contribute
to the following linear equations (setting $c_{00} = 1$):
$$
c_{03} + c_{30} + c_{33} = 4*P_{00} - 1
...
$$
$-c_{03} + c_{30} - c_{33} = 4*P_{01} - 1$
$c_{03} - c_{30} - c_{33} = 4*P_{10} - 1$
And if a Y/2 rotation is applied to the first qubit and a X/2 rotation
is applied to the second qubit before measurement, the measurement
operators are $(I -/+ \sigma_x) \bigotimes (I +/- \sigma_y)$. The
probabilities obtained instead contribute to the following linear equations:
$c_{02} - c_{10} - c_{12} = 4*P_{00} - 1$
$-c_{02} - c_{10} + c_{12} = 4*P_{01} - 1$
$c_{02} + c_{10} + c_{12} = 4*P_{10} - 1$
Note that this set of equations has the same form as the first set under
the transformation $c_{03}$ <-> $c_{02}, c_{30}$ <-> $-c_{10}$ and
$c_{33}$ <-> $-c_{12}$.
Since there are 9 possible combinations of rotations (each producing 3
independent probabilities) and a total of 15 unknown coefficients $c_{ij}$,
one can cast all the measurement results into a overdetermined set of
linear equations numpy.dot(mat, c) = probs. Here c is of length 15 and
contains all the $c_{ij}$'s (except $c_{00}$ which is set to 1), and mat
is a 27 by 15 matrix having three non-zero elements in each row that are
either 1 or -1.
The least-square solution to the above set of linear equations is then
used to construct the density matrix rho.
See Vandersypen and Chuang, Rev. Mod. Phys. 76, 1037 for details and
Steffen et al, Science 313, 1423 for a related experiment.
Args:
sampler: The quantum engine or simulator to run the circuits.
first_qubit: The first qubit under test.
second_qubit: The second qubit under test.
circuit: The circuit to execute on the qubits before tomography.
repetitions: The number of measurements for each basis rotation.
Returns:
A TomographyResult object that stores and plots the density matrix.
"""
# The size of the system of linear equations to be solved.
num_rows = 27
num_cols = 15
def _measurement(two_qubit_circuit: circuits.Circuit) -> np.ndarray:
two_qubit_circuit.append(ops.measure(first_qubit, second_qubit,
key='z'))
results = sampler.run(two_qubit_circuit, repetitions=repetitions)
results_hist = results.histogram(key='z')
prob_list = [results_hist[0], results_hist[1], results_hist[2]]
return np.asarray(prob_list) / repetitions
sigma_0 = np.eye(2) * 0.5
sigma_1 = np.array([[0.0, 1.0], [1.0, 0.0]]) * 0.5
sigma_2 = np.array([[0.0, -1.0j], [1.0j, 0.0]]) * 0.5
sigma_3 = np.array([[1.0, 0.0], [0.0, -1.0]]) * 0.5
sigmas = [sigma_0, sigma_1, sigma_2, sigma_3]
# Stores all 27 measured probabilities (P_00, P_01, P_10 after 9
# different basis rotations).
probs = np.array([])
rots = [ops.X**0, ops.X**0.5, ops.Y**0.5]
# Represents the coefficients in front of the c_ij's (-1, 0 or 1) in the
# system of 27 linear equations.
mat = np.zeros((num_rows, num_cols))
# Represents the relative signs between the linear equations for P_00,
# P_01, and P_10.
s = np.array([[1.0, 1.0, 1.0], [-1.0, 1.0, -1.0], [1.0, -1.0, -1.0]])
for i, rot_1 in enumerate(rots):
for j, rot_2 in enumerate(rots):
m_idx, indices, signs = _indices_after_basis_rot(i, j)
mat[m_idx:(m_idx + 3), indices] = s * np.tile(signs, (3, 1))
test_circuit = circuit + circuits.Circuit(rot_1(first_qubit))
test_circuit.append(rot_2(second_qubit))
probs = np.concatenate((probs, _measurement(test_circuit)))
c, _, _, _ = np.linalg.lstsq(mat, 4.0 * probs - 1.0, rcond=-1)
c = np.concatenate(([1.0], c))
c = c.reshape(4, 4)
rho = np.zeros((4, 4))
for i in range(4):
for j in range(4):
rho = rho + c[i, j] * np.kron(sigmas[i], sigmas[j])
return TomographyResult(rho)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.