id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
4,425 | def _temp_proj(ref_2, ref_1, raw_data, n_proj=6):
"""Remove common signal subspace of ref_2 and ref_1 from raw_data.
Parameters
----------
ref_2 : np.ndarray of float, shape (n_sensors_2, n_times)
The magnetometer data for CSS. Can use either all magnetometer data or
a few selected sensors close to a region to be suppressed.
ref_1 : np.ndarray of float, shape (n_sensors_1, n_times)
The gradiometer data for CSS. Can use either all gradiometer data or
a few selected sensors close to a region to be suppressed.
raw_data : np.ndarray of float, shape (n_sensors_raw, n_times)
The data to be filtered, typically the EEG data.
n_proj : int
The number of projection vectors.
Notes
-----
This temporal projection procedure removes the common signal subspace
between ref_2 and ref_1 from raw_data using n_proj number of
projection vectors. Normally used for cortical signal suppression, where
ref_1 is gradiometer data, ref_2 is magnetometer data and
raw_data is EEG data.
"""
# Orthonormalize gradiometer and magnetometer data by a QR decomposition
ref_1_orth = np.linalg.qr(ref_1.T)[0]
ref_2_orth = np.linalg.qr(ref_2.T)[0]
# Calculate cross-correlation
cross_corr = np.dot(ref_1_orth.T, ref_2_orth)
# Channel weights for common temporal subspace by SVD of cross-correlation
ref_1_ch_weights, _, _ = np.linalg.svd(cross_corr)
# Get temporal signals from channel weights
proj_mat = ref_1_orth @ ref_1_ch_weights
# Project out common subspace
filtered_data = raw_data
proj_vec = proj_mat[:, :n_proj]
weights = filtered_data @ proj_vec
filtered_data -= weights @ proj_vec.T
| def _temp_proj(ref_2, ref_1, raw_data, n_proj=6):
"""Remove common signal subspace of ref_2 and ref_1 from raw_data.
Parameters
----------
ref_2 : np.ndarray of float, shape (n_sensors_2, n_times)
The magnetometer data for CSS. Can use either all magnetometer data or
a few selected sensors close to a region to be suppressed.
ref_1 : np.ndarray of float, shape (n_sensors_1, n_times)
The gradiometer data for CSS. Can use either all gradiometer data or
a few selected sensors close to a region to be suppressed.
raw_data : array of float, shape (n_sensors_raw, n_times)
The data to be filtered, typically the EEG data.
n_proj : int
The number of projection vectors.
Notes
-----
This temporal projection procedure removes the common signal subspace
between ref_2 and ref_1 from raw_data using n_proj number of
projection vectors. Normally used for cortical signal suppression, where
ref_1 is gradiometer data, ref_2 is magnetometer data and
raw_data is EEG data.
"""
# Orthonormalize gradiometer and magnetometer data by a QR decomposition
ref_1_orth = np.linalg.qr(ref_1.T)[0]
ref_2_orth = np.linalg.qr(ref_2.T)[0]
# Calculate cross-correlation
cross_corr = np.dot(ref_1_orth.T, ref_2_orth)
# Channel weights for common temporal subspace by SVD of cross-correlation
ref_1_ch_weights, _, _ = np.linalg.svd(cross_corr)
# Get temporal signals from channel weights
proj_mat = ref_1_orth @ ref_1_ch_weights
# Project out common subspace
filtered_data = raw_data
proj_vec = proj_mat[:, :n_proj]
weights = filtered_data @ proj_vec
filtered_data -= weights @ proj_vec.T
|
7,542 | def get_pkg_data_path(*path, package=None):
"""Make path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings -- for ``os.path`` intelligent path joining.
package : str, optional, keyword only
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
| def get_pkg_data_path(*path, package=None):
"""Get path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings -- for ``os.path`` intelligent path joining.
package : str, optional, keyword only
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
|
40,170 | def _compute_library_size(
data: Union[sp_sparse.spmatrix, np.ndarray]
) -> Tuple[np.ndarray, np.ndarray]:
sum_counts = data.sum(axis=1)
masked_log_sum = np.ma.log(sum_counts)
if np.ma.is_masked(masked_log_sum):
logger.warning(
"This dataset has some empty cells, this might fail scVI inference."
"Data should be filtered with `scanpy.pp.filter_cells()`"
)
log_counts = masked_log_sum.filled(0)
local_mean = (np.mean(log_counts).reshape(-1, 1)).astype(np.float32)
local_var = (np.var(log_counts).reshape(-1, 1)).astype(np.float32)
return local_mean, local_var
| def _compute_library_size(
data: Union[sp_sparse.spmatrix, np.ndarray]
) -> Tuple[np.ndarray, np.ndarray]:
sum_counts = data.sum(axis=1)
masked_log_sum = np.ma.log(sum_counts)
if np.ma.is_masked(masked_log_sum):
logger.warning(
"This dataset has some empty cells, this might fail inference."
"Data should be filtered with `scanpy.pp.filter_cells()`"
)
log_counts = masked_log_sum.filled(0)
local_mean = (np.mean(log_counts).reshape(-1, 1)).astype(np.float32)
local_var = (np.var(log_counts).reshape(-1, 1)).astype(np.float32)
return local_mean, local_var
|
33,041 | def qemu_check_kvm_support() -> bool:
kvm = Path("/dev/kvm")
if not kvm.exists():
return False
# some CI runners may present a non-working KVM device
try:
with kvm.open():
return True
except OSError:
return False
| def qemu_check_kvm_support() -> bool:
kvm = Path("/dev/kvm")
if not kvm.is_char_device():
return False
# some CI runners may present a non-working KVM device
try:
with kvm.open():
return True
except OSError:
return False
|
32,487 | def validate_and_parse_detection_start_end_time(args: Dict[str, Any]) -> Tuple[Optional[datetime], Optional[datetime]]:
"""
Validate and return detection_start_time and detection_end_time as per Chronicle Backstory or \
raise a ValueError if the given inputs are invalid.
:type args: dict
:param args: contains all arguments for command
:return : detection_start_time, detection_end_time: Detection start and end time in the format API accepts
:rtype : Tuple[Optional[datetime], Optional[datetime]]
"""
detection_start_time = arg_to_datetime(args.get('start_time'), 'start_time') if args.get('start_time') \
else arg_to_datetime(args.get('detection_start_time'), 'detection_start_time')
detection_end_time = arg_to_datetime(args.get('end_time'), 'end_time') if args.get('end_time') \
else arg_to_datetime(args.get('detection_end_time'), 'detection_end_time')
list_basis = args.get('list_basis', '')
if list_basis and not detection_start_time and not detection_end_time:
raise ValueError("To sort detections by \"list_basis\", either \"start_time\" or \"end_time\" argument is "
"required.")
if detection_start_time:
detection_start_time = detection_start_time.strftime(DATE_FORMAT) # type: ignore
if detection_end_time:
detection_end_time = detection_end_time.strftime(DATE_FORMAT) # type: ignore
return detection_start_time, detection_end_time
| def validate_and_parse_detection_start_end_time(args: Dict[str, Any]) -> Tuple[Optional[datetime], Optional[datetime]]:
"""
Validate and return detection_start_time and detection_end_time as per Chronicle Backstory or \
raise a ValueError if the given inputs are invalid.
:type args: dict
:param args: contains all arguments for command
:return : detection_start_time, detection_end_time: Detection start and end time in the format API accepts
:rtype : Tuple[Optional[str], Optional[str]]
"""
detection_start_time = arg_to_datetime(args.get('start_time'), 'start_time') if args.get('start_time') \
else arg_to_datetime(args.get('detection_start_time'), 'detection_start_time')
detection_end_time = arg_to_datetime(args.get('end_time'), 'end_time') if args.get('end_time') \
else arg_to_datetime(args.get('detection_end_time'), 'detection_end_time')
list_basis = args.get('list_basis', '')
if list_basis and not detection_start_time and not detection_end_time:
raise ValueError("To sort detections by \"list_basis\", either \"start_time\" or \"end_time\" argument is "
"required.")
if detection_start_time:
detection_start_time = detection_start_time.strftime(DATE_FORMAT) # type: ignore
if detection_end_time:
detection_end_time = detection_end_time.strftime(DATE_FORMAT) # type: ignore
return detection_start_time, detection_end_time
|
13,913 | def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.branch_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return [
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
]
| def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return [
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
]
|
34,409 | def get_entity_extractors(interpreter: Interpreter) -> Set[Text]:
"""Finds the names of entity extractors used by the interpreter.
Processors are removed since they do not detect the boundaries themselves.
"""
from rasa.nlu.extractors.extractor import EntityExtractor
extractors = set()
for c in interpreter.pipeline:
if isinstance(c, EntityExtractor):
if c.name == "DIETClassifier":
if c.component_config[ENTITY_RECOGNITION]:
extractors.add(c.name)
else:
extractors.add(c.name)
return extractors - ENTITY_PROCESSORS
| def get_entity_extractors(interpreter: Interpreter) -> Set[Text]:
"""Finds the names of entity extractors used by the interpreter.
Processors are removed since they do not detect the boundaries themselves.
"""
from rasa.nlu.extractors.extractor import EntityExtractor
extractors = set()
for c in interpreter.pipeline:
if isinstance(c, EntityExtractor):
if isinstance(c, DIETClassifier):
if c.component_config[ENTITY_RECOGNITION]:
extractors.add(c.name)
else:
extractors.add(c.name)
return extractors - ENTITY_PROCESSORS
|
37,946 | def test_project_generate():
"""
Run project by passing in a pandas.DataFrame as input.
"""
output = project(center=[0, -1], endpoint=[0, 1], flat_earth=True, generate=0.5)
assert isinstance(output, pd.DataFrame)
assert output.shape == (5, 3)
npt.assert_allclose(output.iloc[1], [3.061617e-17, -0.5, 0.5])
| def test_project_generate():
"""
Run project by passing in center and endpoint as input.
"""
output = project(center=[0, -1], endpoint=[0, 1], flat_earth=True, generate=0.5)
assert isinstance(output, pd.DataFrame)
assert output.shape == (5, 3)
npt.assert_allclose(output.iloc[1], [3.061617e-17, -0.5, 0.5])
|
27,950 | def generate_ast(triple_arch, action, source, config, env):
""" Generates ASTs for the current compilation command. """
ast_joined_path = os.path.join(config.ctu_dir, triple_arch, 'ast',
os.path.realpath(source)[1:] + '.ast')
ast_path = os.path.abspath(ast_joined_path)
ast_dir = os.path.dirname(ast_path)
if not os.path.isdir(ast_dir):
try:
os.makedirs(ast_dir)
except OSError:
pass
cmd = ctu_triple_arch.get_compile_command(action, config, source)
# __clang__analyzer__ macro needs to be set in the imported TUs too.
cmd.extend(['-emit-ast', '-D__clang_analyzer__', '-w', '-o', ast_path])
cmdstr = ' '.join(cmd)
LOG.debug_analyzer("Generating AST using '%s'", cmdstr)
ret_code, _, err = analyzer_base.SourceAnalyzer.run_proc(cmd,
env,
action.directory)
if ret_code != 0:
LOG.error("Error generating AST.\n\ncommand:\n\n%s\n\nstderr:\n\n%s",
cmdstr, err)
| def generate_ast(triple_arch, action, source, config, env):
""" Generates ASTs for the current compilation command. """
ast_joined_path = os.path.join(config.ctu_dir, triple_arch, 'ast',
os.path.realpath(source)[1:] + '.ast')
ast_path = os.path.abspath(ast_joined_path)
ast_dir = os.path.dirname(ast_path)
if not os.path.isdir(ast_dir):
try:
os.makedirs(ast_dir)
except OSError:
pass
cmd = ctu_triple_arch.get_compile_command(action, config, source)
# __clang__analyzer__ macro needs to be set in the imported TUs too.
cmd.extend(['-emit-ast', '-D__clang_analyzer__', '-w', '-o', ast_path])
cmdstr = ' '.join(cmd)
LOG.debug_analyzer("Generating AST using '%s'", ' '.join(cmd))
ret_code, _, err = analyzer_base.SourceAnalyzer.run_proc(cmd,
env,
action.directory)
if ret_code != 0:
LOG.error("Error generating AST.\n\ncommand:\n\n%s\n\nstderr:\n\n%s",
cmdstr, err)
|
44,364 | def test_vmc_functions():
ha, sx, ma, sampler, driver = _setup_vmc()
driver.advance(500)
assert driver.energy.mean == approx(ma.expect(ha).mean, abs=1e-5)
state = ma.to_array()
n_samples = 16000
ma.n_samples = n_samples
ma.n_discard_per_chain = 100
# Check zero gradieent
_, grads = ma.expect_and_grad(ha)
def check_shape(a, b):
assert a.shape == b.shape
jax.tree_multimap(check_shape, grads, ma.parameters)
grads, _ = nk.jax.tree_ravel(grads)
assert np.mean(np.abs(grads) ** 2) == approx(0.0, abs=1e-8)
# end
for op, name in (ha, "ha"), (sx, "sx"):
print("Testing expectation of op={}".format(name))
exact_ex = (state.T.conj() @ op.to_linear_operator() @ state).real
op_stats = ma.expect(op)
mean = op_stats.mean
var = op_stats.variance
print(mean, var)
# 5-sigma test for expectation values
tol = np.sqrt(var / float(ma.n_samples)) * 5
assert mean.real == approx(exact_ex, abs=tol)
| def test_vmc_functions():
ha, sx, ma, sampler, driver = _setup_vmc()
driver.advance(500)
assert driver.energy.mean == approx(ma.expect(ha).mean, abs=1e-5)
state = ma.to_array()
n_samples = 16000
ma.n_samples = n_samples
ma.n_discard_per_chain = 100
# Check zero gradient
_, grads = ma.expect_and_grad(ha)
def check_shape(a, b):
assert a.shape == b.shape
jax.tree_multimap(check_shape, grads, ma.parameters)
grads, _ = nk.jax.tree_ravel(grads)
assert np.mean(np.abs(grads) ** 2) == approx(0.0, abs=1e-8)
# end
for op, name in (ha, "ha"), (sx, "sx"):
print("Testing expectation of op={}".format(name))
exact_ex = (state.T.conj() @ op.to_linear_operator() @ state).real
op_stats = ma.expect(op)
mean = op_stats.mean
var = op_stats.variance
print(mean, var)
# 5-sigma test for expectation values
tol = np.sqrt(var / float(ma.n_samples)) * 5
assert mean.real == approx(exact_ex, abs=tol)
|
59,386 | def test_redact():
redacted = wandb_settings._redact_dict({"this": 2, "that": 9, "api_key": "secret"})
assert redacted == {"this": 2, "that": 9, "api_key": "***REDACTED***"}
redacted = wandb_settings._redact_dict(
{"ok": "keep", "unsafe": 9, "bad": "secret"},
unsafe_keys={"unsafe", "bad"},
redact_str="OMIT",
)
assert redacted == {"ok": "keep", "unsafe": "OMIT", "bad": "OMIT"}
| def test_redact():
redacted = wandb_settings._redact_dict({"this": 2, "that": 9, "api_key": "secret"})
assert redacted == {"this": 2, "that": 9, "api_key": "***REDACTED***"}
redacted = wandb_settings._redact_dict({"this": 2, "that": 9})
assert redacted == {"this": 2, "that": 9}
redacted = wandb_settings._redact_dict(
{"ok": "keep", "unsafe": 9, "bad": "secret"},
unsafe_keys={"unsafe", "bad"},
redact_str="OMIT",
)
assert redacted == {"ok": "keep", "unsafe": "OMIT", "bad": "OMIT"}
|
32,183 | def extract_trigger_kv(trigger_events: list):
"""
The main information about the alert is more convenient to have as key/value pairs
instead of using field weights as it makes writing mapping for individual values easier.
"""
trigger_event = None
for event in trigger_events:
if event.get("trigger"):
trigger_event = event
break
flattened = {}
if trigger_event:
for field in trigger_event["fields"]:
key = field["key"]
value = field["value"]
flattened[key] = value
return flattened
| def extract_trigger_kv(trigger_events: list):
"""
The main information about the alert is more convenient to have as key/value pairs
instead of using field weights as it makes writing mapping for individual values easier.
"""
trigger_event = None
for event in trigger_events:
if event.get("trigger"):
trigger_event = event
break
flattened = {}
if trigger_event:
for field in trigger_event.get("fields", []):
key = field["key"]
value = field["value"]
flattened[key] = value
return flattened
|
10,866 | def ini_check ():
"""set environmental variable and change directory"""
# Note:
# hal_gremlin gets INI file from os.environ (only)
# hal_gremlin expects cwd to be same as INI file
ini_filename = get_linuxcnc_ini_file()
if ini_filename is not None:
os.putenv('INI_FILE_NAME',ini_filename) # ineffective
os.environ['INI_FILE_NAME'] = ini_filename # need for hal_gremlin
os.chdir(os.path.dirname(ini_filename))
if g_verbose:
print(('ini_check: INI_FILENAME= %s' % ini_filename))
print(('ini_check: curdir= %s' % os.path.curdir))
return True # success
print((_('%s:linuxcnc INI file not available') % g_progname))
return False # exit here crashes glade-gtk2
| def ini_check ():
"""set environmental variable and change directory"""
# Note:
# hal_gremlin gets INI file from os.environ (only)
# hal_gremlin expects cwd to be same as INI file
ini_filename = get_linuxcnc_ini_file()
if ini_filename is not None:
os.putenv('INI_FILE_NAME',ini_filename) # ineffective
os.environ['INI_FILE_NAME'] = ini_filename # need for hal_gremlin
os.chdir(os.path.dirname(ini_filename))
if g_verbose:
print(('ini_check: INI_FILENAME= %s' % ini_filename))
print(('ini_check: curdir= %s' % os.path.curdir))
return True # success
print((_('%s: LinuxCNC INI file not available') % g_progname))
return False # exit here crashes glade-gtk2
|
27,314 | def compute_authenticator(raw_payload: Union[bytes, bytearray],
dbmsg: Message,
authenticator_fn: Callable[[Message,
bytearray,
int],
bytearray],
freshness_value: int) \
-> bytearray:
"""Given a byte-like object that contains the encoded signals to be
send, compute the full authenticator SecOC value.
"""
if dbmsg.autosar is None or dbmsg.autosar.secoc is None:
raise SecOCError(f'Message "{dbmsg.name}" is not secured')
secoc_props = dbmsg.autosar.secoc
n_fresh = secoc_props.freshness_bit_length
payload_len = secoc_props.payload_length
# build the data that needs to be passed to authentificator function
auth_data = bitstruct.pack(f'u16' # data ID
f'r{payload_len*8}' # payload to be secured
f'u{n_fresh}', # freshness value
secoc_props.data_id,
raw_payload[:payload_len],
freshness_value)
# compute authenticator value
return authenticator_fn(dbmsg, auth_data, freshness_value)
| def compute_authenticator(raw_payload: Union[bytes, bytearray],
dbmsg: Message,
authenticator_fn: Callable[[Message,
bytearray,
int],
bytearray],
freshness_value: int) \
-> bytearray:
"""Given a byte-like object that contains the encoded signals to be
sent, compute the full authenticator SecOC value.
"""
if dbmsg.autosar is None or dbmsg.autosar.secoc is None:
raise SecOCError(f'Message "{dbmsg.name}" is not secured')
secoc_props = dbmsg.autosar.secoc
n_fresh = secoc_props.freshness_bit_length
payload_len = secoc_props.payload_length
# build the data that needs to be passed to authentificator function
auth_data = bitstruct.pack(f'u16' # data ID
f'r{payload_len*8}' # payload to be secured
f'u{n_fresh}', # freshness value
secoc_props.data_id,
raw_payload[:payload_len],
freshness_value)
# compute authenticator value
return authenticator_fn(dbmsg, auth_data, freshness_value)
|
59,851 | def fake_random_ds(
ndims,
peak_value=1.0,
fields=None,
units=None,
particle_fields=None,
particle_field_units=None,
negative=False,
nprocs=1,
particles=0,
length_unit=1.0,
unit_system="cgs",
bbox=None,
):
from yt.loaders import load_uniform_grid
if fields is not None and units is None:
raise RuntimeError(
"Error when creating a fake_random_ds:"
" passed a non-default `fields` without specifying `units`."
)
if fields is None:
fields = _fake_random_ds_default_fields
if units is None:
units = _fake_random_ds_default_units
prng = RandomState(0x4D3D3D3)
if not is_sequence(ndims):
ndims = [ndims, ndims, ndims]
else:
assert len(ndims) == 3
if not is_sequence(negative):
negative = [negative for f in fields]
assert len(fields) == len(negative)
offsets = []
for n in negative:
if n:
offsets.append(0.5)
else:
offsets.append(0.0)
data = {}
for field, offset, u in zip(fields, offsets, units):
v = (prng.random_sample(ndims) - offset) * peak_value
if field[0] == "all":
v = v.ravel()
data[field] = (v, u)
if particles:
if particle_fields is not None:
for field, unit in zip(particle_fields, particle_field_units):
if field in ("particle_position", "particle_velocity"):
data["io", field] = (prng.random_sample((int(particles), 3)), unit)
else:
data["io", field] = (prng.random_sample(size=int(particles)), unit)
else:
for f in (f"particle_position_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles), "code_length")
for f in (f"particle_velocity_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles) - 0.5, "cm/s")
data["io", "particle_mass"] = (prng.random_sample(particles), "g")
ug = load_uniform_grid(
data,
ndims,
length_unit=length_unit,
nprocs=nprocs,
unit_system=unit_system,
bbox=bbox,
)
return ug
| def fake_random_ds(
ndims,
peak_value=1.0,
fields=None,
units=None,
particle_fields=None,
particle_field_units=None,
negative=False,
nprocs=1,
particles=0,
length_unit=1.0,
unit_system="cgs",
bbox=None,
):
from yt.loaders import load_uniform_grid
if (fields, units) == (None, None):
fields = _fake_random_ds_default_fields
units = _fake_random_ds_default_units
elif None in (fields, units):
raise ValueError(
"Error in creating a fake_random_ds:"
" `fields` and `units` keyword arguments cannot be used separately."
)
elif len(fields) != len(units):
raise ValueError(
f"inconsistent sizes in `fields` ({len(fields)}) and `units` ({len(units)}) arguments."
)
prng = RandomState(0x4D3D3D3)
if not is_sequence(ndims):
ndims = [ndims, ndims, ndims]
else:
assert len(ndims) == 3
if not is_sequence(negative):
negative = [negative for f in fields]
assert len(fields) == len(negative)
offsets = []
for n in negative:
if n:
offsets.append(0.5)
else:
offsets.append(0.0)
data = {}
for field, offset, u in zip(fields, offsets, units):
v = (prng.random_sample(ndims) - offset) * peak_value
if field[0] == "all":
v = v.ravel()
data[field] = (v, u)
if particles:
if particle_fields is not None:
for field, unit in zip(particle_fields, particle_field_units):
if field in ("particle_position", "particle_velocity"):
data["io", field] = (prng.random_sample((int(particles), 3)), unit)
else:
data["io", field] = (prng.random_sample(size=int(particles)), unit)
else:
for f in (f"particle_position_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles), "code_length")
for f in (f"particle_velocity_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles) - 0.5, "cm/s")
data["io", "particle_mass"] = (prng.random_sample(particles), "g")
ug = load_uniform_grid(
data,
ndims,
length_unit=length_unit,
nprocs=nprocs,
unit_system=unit_system,
bbox=bbox,
)
return ug
|
38,454 | def merge_grids_of_equal_dim(gb):
"""Merges all grids that have the same dimension in the GridBucket. Thus
the returned GridBucket only has one grid per dimension. See also
pp.utils.grid_utils.merge_grids(grids).
Parameters:
gb (pp.GridBucket): A Grid bucket with possible many grids per dimension
Returns:
mergedGb (pp.GridBucket): A grid bucket with the merged grids of gb, and
updated projections and mortar grids.
"""
dimMax = gb.dim_max()
# First merge all grid nodes.
mergedGrids = []
gridsOfDim = np.empty(dimMax + 1, dtype=list)
gridIdx = np.empty(dimMax + 1, dtype=list)
numCells = np.empty(dimMax + 1, dtype=list)
for i in range(dimMax + 1):
gridIdx[i] = []
numCells[i] = []
gridsOfDim[i] = gb.grids_of_dimension(i)
if len(gridsOfDim[i]) == 0:
mergedGrids.append([])
continue
mergedGrids.append(merge_grids(gridsOfDim[i]))
# Store the node number of each merged node.
# This is used to obtain the correct merged
# mortar projections
for grid in gridsOfDim[i]:
d = gb.node_props(grid)
gridIdx[i].append(d["node_number"])
numCells[i].append(grid.num_cells)
# Initiate mortar grids.
mortarsOfDim = np.empty(dimMax + 1, dtype=list)
for i in range(len(mortarsOfDim)):
mortarsOfDim[i] = []
# Collect all mortar grids in a list of list. list[i] contains
# all mortar grids of dimension i.
for e, d in gb.edges():
mortar_grids = []
mg = d["mortar_grid"]
for sg in mg.side_grids.values():
mortar_grids.append(sg)
mortarsOfDim[mg.dim].append(merge_grids(mortar_grids))
# Initialize mortar projections.
primary2mortar = np.empty(dimMax + 1, dtype=np.ndarray)
secondary2mortar = np.empty(dimMax + 1, dtype=np.ndarray)
# Loop over all dimensions and initiate the mapping size
for i in range(dimMax):
primary2mortar[i] = np.empty(
(len(mortarsOfDim[i]), len(gridsOfDim[i + 1])), dtype=np.object
)
secondary2mortar[i] = np.empty(
(len(mortarsOfDim[i]), len(gridsOfDim[i])), dtype=np.object
)
# Add an empty grid for mortar row. This is to let the block matrices
# mergedSecondary2Mortar and mergedPrimary2Mortar know the correct dimension
# if there is an empty mapping. It should be sufficient to add zeros to
# one of the mortar grids.
for j in range(len(gridsOfDim[i + 1])):
if len(mortarsOfDim[i]) == 0:
continue
numMortarCells = mortarsOfDim[i][0].num_cells
numGridFaces = gridsOfDim[i + 1][j].num_faces
primary2mortar[i][0][j] = sps.csc_matrix((numMortarCells, numGridFaces))
for j in range(len(gridsOfDim[i])):
if len(mortarsOfDim[i]) == 0:
continue
numMortarCells = mortarsOfDim[i][0].num_cells
numGridCells = gridsOfDim[i][j].num_cells
secondary2mortar[i][0][j] = sps.csc_matrix((numMortarCells, numGridCells))
# Collect the mortar projections
mortarPos = np.zeros(dimMax + 1, dtype=np.int)
for e, d in gb.edges():
mg = d["mortar_grid"]
gs, gm = gb.nodes_of_edge(e)
ds = gb.node_props(gs)
dm = gb.node_props(gm)
assert gs.dim == mg.dim and gm.dim == mg.dim + 1
secondaryPos = np.argwhere(
np.array(gridIdx[mg.dim]) == ds["node_number"]
).ravel()
primaryPos = np.argwhere(
np.array(gridIdx[mg.dim + 1]) == dm["node_number"]
).ravel()
assert secondaryPos.size == 1 and primaryPos.size == 1
secondary2mortar[mg.dim][
mortarPos[mg.dim], secondaryPos
] = mg.secondary_to_mortar_int()
primary2mortar[mg.dim][
mortarPos[mg.dim], primaryPos
] = mg.primary_to_mortar_int()
mortarPos[mg.dim] += 1
# Finally, merge the mortar grids and projections
mergedMortars = []
mergedSecondary2Mortar = []
mergedPrimary2Mortar = []
for dim in range(dimMax + 1):
if len(mortarsOfDim[dim]) == 0:
mergedMortars.append([])
mergedSecondary2Mortar.append([])
mergedPrimary2Mortar.append([])
else:
mergedMortars.append(merge_grids(mortarsOfDim[dim]))
mergedSecondary2Mortar.append(sps.bmat(secondary2mortar[dim], format="csc"))
mergedPrimary2Mortar.append(sps.bmat(primary2mortar[dim], format="csc"))
# Initiate the new grid bucket and add the merged grids.
mergedGb = pp.GridBucket()
mergedGb.add_nodes([g for g in mergedGrids if g != []])
for dim in range(dimMax + 1):
cell_idx = []
for i in range(len(gridIdx[dim])):
cell_idx.append(gridIdx[dim][i] * np.ones(numCells[dim][i], dtype=int))
if len(gridIdx[dim]) > 0:
data = mergedGb.node_props(mergedGb.grids_of_dimension(dim)[0])
data["cell_2_frac"] = np.hstack(cell_idx)
for dim in range(dimMax):
mg = mergedMortars[dim]
if mg == list([]):
continue
gm = mergedGrids[dim + 1]
gs = mergedGrids[dim]
mergedGb.add_edge((gm, gs), np.empty(0))
mg = pp.MortarGrid(gs.dim, {pp.grids.mortar_grid.MortarSides.NONE_SIDE: mg})
mg._primary_to_mortar_int = mergedPrimary2Mortar[dim]
mg._secondary_to_mortar_int = mergedSecondary2Mortar[dim]
d = mergedGb.edge_props((gm, gs))
d["mortar_grid"] = mg
mergedGb.assign_node_ordering()
# for g, _ in mergedGb:
# g.tags: Dict = {}
# g.initiate_face_tags()
# g.update_boundary_face_tag()
# # Add tag for the boundary nodes
# g.initiate_node_tags()
# g.update_boundary_node_tag()
return mergedGb
| def merge_grids_of_equal_dim(gb: pp.GridBucket) -> pp.GridBucket:
"""Merges all grids that have the same dimension in the GridBucket. Thus
the returned GridBucket only has one grid per dimension. See also
pp.utils.grid_utils.merge_grids(grids).
Parameters:
gb (pp.GridBucket): A Grid bucket with possible many grids per dimension
Returns:
mergedGb (pp.GridBucket): A grid bucket with the merged grids of gb, and
updated projections and mortar grids.
"""
dimMax = gb.dim_max()
# First merge all grid nodes.
mergedGrids = []
gridsOfDim = np.empty(dimMax + 1, dtype=list)
gridIdx = np.empty(dimMax + 1, dtype=list)
numCells = np.empty(dimMax + 1, dtype=list)
for i in range(dimMax + 1):
gridIdx[i] = []
numCells[i] = []
gridsOfDim[i] = gb.grids_of_dimension(i)
if len(gridsOfDim[i]) == 0:
mergedGrids.append([])
continue
mergedGrids.append(merge_grids(gridsOfDim[i]))
# Store the node number of each merged node.
# This is used to obtain the correct merged
# mortar projections
for grid in gridsOfDim[i]:
d = gb.node_props(grid)
gridIdx[i].append(d["node_number"])
numCells[i].append(grid.num_cells)
# Initiate mortar grids.
mortarsOfDim = np.empty(dimMax + 1, dtype=list)
for i in range(len(mortarsOfDim)):
mortarsOfDim[i] = []
# Collect all mortar grids in a list of list. list[i] contains
# all mortar grids of dimension i.
for e, d in gb.edges():
mortar_grids = []
mg = d["mortar_grid"]
for sg in mg.side_grids.values():
mortar_grids.append(sg)
mortarsOfDim[mg.dim].append(merge_grids(mortar_grids))
# Initialize mortar projections.
primary2mortar = np.empty(dimMax + 1, dtype=np.ndarray)
secondary2mortar = np.empty(dimMax + 1, dtype=np.ndarray)
# Loop over all dimensions and initiate the mapping size
for i in range(dimMax):
primary2mortar[i] = np.empty(
(len(mortarsOfDim[i]), len(gridsOfDim[i + 1])), dtype=np.object
)
secondary2mortar[i] = np.empty(
(len(mortarsOfDim[i]), len(gridsOfDim[i])), dtype=np.object
)
# Add an empty grid for mortar row. This is to let the block matrices
# mergedSecondary2Mortar and mergedPrimary2Mortar know the correct dimension
# if there is an empty mapping. It should be sufficient to add zeros to
# one of the mortar grids.
for j in range(len(gridsOfDim[i + 1])):
if len(mortarsOfDim[i]) == 0:
continue
numMortarCells = mortarsOfDim[i][0].num_cells
numGridFaces = gridsOfDim[i + 1][j].num_faces
primary2mortar[i][0][j] = sps.csc_matrix((numMortarCells, numGridFaces))
for j in range(len(gridsOfDim[i])):
if len(mortarsOfDim[i]) == 0:
continue
numMortarCells = mortarsOfDim[i][0].num_cells
numGridCells = gridsOfDim[i][j].num_cells
secondary2mortar[i][0][j] = sps.csc_matrix((numMortarCells, numGridCells))
# Collect the mortar projections
mortarPos = np.zeros(dimMax + 1, dtype=np.int)
for e, d in gb.edges():
mg = d["mortar_grid"]
gs, gm = gb.nodes_of_edge(e)
ds = gb.node_props(gs)
dm = gb.node_props(gm)
assert gs.dim == mg.dim and gm.dim == mg.dim + 1
secondaryPos = np.argwhere(
np.array(gridIdx[mg.dim]) == ds["node_number"]
).ravel()
primaryPos = np.argwhere(
np.array(gridIdx[mg.dim + 1]) == dm["node_number"]
).ravel()
assert secondaryPos.size == 1 and primaryPos.size == 1
secondary2mortar[mg.dim][
mortarPos[mg.dim], secondaryPos
] = mg.secondary_to_mortar_int()
primary2mortar[mg.dim][
mortarPos[mg.dim], primaryPos
] = mg.primary_to_mortar_int()
mortarPos[mg.dim] += 1
# Finally, merge the mortar grids and projections
mergedMortars = []
mergedSecondary2Mortar = []
mergedPrimary2Mortar = []
for dim in range(dimMax + 1):
if len(mortarsOfDim[dim]) == 0:
mergedMortars.append([])
mergedSecondary2Mortar.append([])
mergedPrimary2Mortar.append([])
else:
mergedMortars.append(merge_grids(mortarsOfDim[dim]))
mergedSecondary2Mortar.append(sps.bmat(secondary2mortar[dim], format="csc"))
mergedPrimary2Mortar.append(sps.bmat(primary2mortar[dim], format="csc"))
# Initiate the new grid bucket and add the merged grids.
mergedGb = pp.GridBucket()
mergedGb.add_nodes([g for g in mergedGrids if g != []])
for dim in range(dimMax + 1):
cell_idx = []
for i in range(len(gridIdx[dim])):
cell_idx.append(gridIdx[dim][i] * np.ones(numCells[dim][i], dtype=int))
if len(gridIdx[dim]) > 0:
data = mergedGb.node_props(mergedGb.grids_of_dimension(dim)[0])
data["cell_2_frac"] = np.hstack(cell_idx)
for dim in range(dimMax):
mg = mergedMortars[dim]
if mg == list([]):
continue
gm = mergedGrids[dim + 1]
gs = mergedGrids[dim]
mergedGb.add_edge((gm, gs), np.empty(0))
mg = pp.MortarGrid(gs.dim, {pp.grids.mortar_grid.MortarSides.NONE_SIDE: mg})
mg._primary_to_mortar_int = mergedPrimary2Mortar[dim]
mg._secondary_to_mortar_int = mergedSecondary2Mortar[dim]
d = mergedGb.edge_props((gm, gs))
d["mortar_grid"] = mg
mergedGb.assign_node_ordering()
# for g, _ in mergedGb:
# g.tags: Dict = {}
# g.initiate_face_tags()
# g.update_boundary_face_tag()
# # Add tag for the boundary nodes
# g.initiate_node_tags()
# g.update_boundary_node_tag()
return mergedGb
|
30,119 | def distance_to_identity(dist, d_low=None, d_high=None):
"""
ANI = 1-distance
"""
if not 0 <= dist <= 1:
raise ValueError(f"Error: distance value {dist} is not between 0 and 1!")
ident = 1 - dist
result = ident
id_low, id_high = None, None
if any([d_low is not None, d_high is not None]):
if d_low is not None and d_high is not None:
if (0 <= d_low <= 1) and (0 <= d_high <= 1):
id_high = 1 - d_low
id_low = 1 - d_high
result = [ident, id_low, id_high]
else:
raise ValueError(
"Error: `distance_to_identity` expects either one value (distance) or three values (distance, low_ci, high_ci)."
)
return result
| def distance_to_identity(dist, *, d_low=None, d_high=None):
"""
ANI = 1-distance
"""
if not 0 <= dist <= 1:
raise ValueError(f"Error: distance value {dist} is not between 0 and 1!")
ident = 1 - dist
result = ident
id_low, id_high = None, None
if any([d_low is not None, d_high is not None]):
if d_low is not None and d_high is not None:
if (0 <= d_low <= 1) and (0 <= d_high <= 1):
id_high = 1 - d_low
id_low = 1 - d_high
result = [ident, id_low, id_high]
else:
raise ValueError(
"Error: `distance_to_identity` expects either one value (distance) or three values (distance, low_ci, high_ci)."
)
return result
|
28,839 | def as_chunks(max_size: int, iterator: _Iter[T]) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
max_size: :class:`int`
The maximum chunk size.
iterator: Union[:class:`Iterator`, :class:`AsyncIterator`]
The iterator to chunk, can be sync or async.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError('Chunk sizes must be greater than 0.')
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
| def as_chunks(max_size: int, iterator: _Iter[T]) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
max_size: :class:`int`
The maximum chunk size.
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError('Chunk sizes must be greater than 0.')
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
|
32,012 | def parse_incident_from_item(item):
"""
Parses an incident from an item
:param item: item to parse
:return: Parsed item
"""
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
# handle recipients
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
# handle cc
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
# handle email from
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
# email format
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
# handle attachments
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
try:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
# file attachment
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
# save the attachment
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
# other item attachment
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
# save the attachment
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = []
for h, v in attached_email.items():
if not isinstance(v, str):
try:
v = str(v)
except: # noqa: E722
demisto.debug('cannot parse the header "{}"'.format(h))
continue
v = ' '.join(map(str.strip, v.split('\r\n')))
attached_email_headers.append((h, v))
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
attached_email_bytes = attached_email.as_bytes()
chardet_detection = chardet.detect(attached_email_bytes)
encoding = chardet_detection.get('encoding', 'utf-8') or 'utf-8'
try:
# Trying to decode using the detected encoding
data = attached_email_bytes.decode(encoding)
except UnicodeDecodeError:
# In case the detected encoding fails apply the default encoding
demisto.info(f'Could not decode attached email using detected encoding:{encoding}, retrying '
f'using utf-8.\nAttached email:\n{attached_email}')
data = attached_email_bytes.decode('utf-8')
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
data,
)
if file_result:
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
except ErrorCannotOpenFileAttachment:
demisto.error("Could not open file attachment file for message_id: {}".format(item.message_id))
# handle headers
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
# handle item id
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
# handle conversion id
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
| def parse_incident_from_item(item):
"""
Parses an incident from an item
:param item: item to parse
:return: Parsed item
"""
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
# handle recipients
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
# handle cc
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
# handle email from
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
# email format
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
# handle attachments
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
try:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
# file attachment
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
# save the attachment
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
# other item attachment
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
# save the attachment
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = []
for h, v in attached_email.items():
if not isinstance(v, str):
try:
v = str(v)
except: # noqa: E722
demisto.debug('cannot parse the header "{}"'.format(h))
continue
v = ' '.join(map(str.strip, v.split('\r\n')))
attached_email_headers.append((h, v))
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
attached_email_bytes = attached_email.as_bytes()
chardet_detection = chardet.detect(attached_email_bytes)
encoding = chardet_detection.get('encoding', 'utf-8') or 'utf-8'
try:
# Trying to decode using the detected encoding
data = attached_email_bytes.decode(encoding)
except UnicodeDecodeError:
# In case the detected encoding fails apply the default encoding
demisto.info(f'Could not decode attached email using detected encoding:{encoding}, retrying '
f'using utf-8.\nAttached email:\n{attached_email}')
data = attached_email_bytes.decode('utf-8')
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
data,
)
if file_result:
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
except ErrorCannotOpenFileAttachment:
demisto.error("Could not open attachment file for message_id: {}".format(item.message_id))
# handle headers
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
# handle item id
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
# handle conversion id
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
|
54,721 | def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Utility function to test if input data frame is missing any timestamps relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param pd.DataFrame df: data frame which needs to be tested for missing timestamps
:param str frequency: frequency i.e. sampling frequency of the data, expressed in seconds. A list of acceptable
frequency strings are available here
(https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
:param str column_name: name of the column which has time series if not the index.
:param pd.Timestamp first_time_stamp: timestamp at which the time_series is expected to start from.
:param pd.Timestamp last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df.sort_index(inplace=True)
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
| def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Utility function to test if input data frame is missing any timestamps relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param df: data frame which needs to be tested for missing timestamps
:param frequency: frequency i.e. sampling frequency of the data, expressed in seconds. A list of acceptable
frequency strings are available here
(https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
:param column_name: name of the column which has time series if not the index.
:param first_time_stamp: timestamp at which the time_series is expected to start from.
:param last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df.sort_index(inplace=True)
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
|
57,681 | def map_scim(clientData):
try:
clientData = json.loads(clientData)
except Exception:
pass
if type(clientData) != dict:
raise Exception('Provided client data is not JSON compatible')
scim_extension = INPUT_SCIM_EXTENSION_KEY.replace('.', '\.')
mapping = {
"active": "active",
"addressCountry": "addresses(val.primary && val.primary==true).[0].country",
"addressFormatted": "addresses(val.primary && val.primary==true).[0].formatted",
"addressLocailty": "addresses(val.primary && val.primary==true).[0].locality",
"addressPostalCode": "addresses(val.primary && val.primary==true).[0].postalCode",
"addressRegion": "addresses(val.primary && val.primary==true).[0].region",
"addressStreeetAddress": "addresses(val.primary && val.primary==true).[0].streetAddress",
"addressType": "addresses(val.primary && val.primary==true).[0].type",
"costCenter": scim_extension + ".costCenter",
"department": scim_extension + ".department",
"displayName": "displayName",
"division": scim_extension + ".division",
"email": "emails(val.primary && val.primary==true).[0].value",
"emailType": "emails(val.primary && val.primary==true).[0].type",
"employeeNumber": scim_extension + ".employeeNumber",
"groups": "groups(val.display).display",
"id": "id",
"externalId": "externalId",
"locale": "locale",
"manager": scim_extension + ".manager.value",
"nameFormatted": "name.formatted",
"nameFamilyName": "name.familyName",
"nameGivenName": "name.givenName",
"nameHonorificPrefix": "name.honorificPrefix",
"nameHonorificSuffix": "name.honorificSuffix",
"nameMiddleName": "name.middleName",
"nickName": "nickName",
"organization": scim_extension + ".organization",
"password": "password",
"photo": "photos(val.type && val.type=='photo').[0].value",
"preferredLanguage": "preferredLanguage",
"profileUrl": "profileUrl",
"thumbnnail": "photos(val.type && val.type=='thumbnail').[0].value",
"timezone": "timezone",
"title": "title",
"userName": "userName",
"userType": "userType",
}
ret = dict()
for k, v in mapping.items():
try:
ret[k] = demisto.dt(clientData, v)
except Exception:
ret[k] = None
return ret
| def map_scim(client_data):
try:
client_data = json.loads(client_data)
except Exception:
pass
if type(client_data) != dict:
raise Exception('Provided client data is not JSON compatible')
scim_extension = INPUT_SCIM_EXTENSION_KEY.replace('.', '\.')
mapping = {
"active": "active",
"addressCountry": "addresses(val.primary && val.primary==true).[0].country",
"addressFormatted": "addresses(val.primary && val.primary==true).[0].formatted",
"addressLocailty": "addresses(val.primary && val.primary==true).[0].locality",
"addressPostalCode": "addresses(val.primary && val.primary==true).[0].postalCode",
"addressRegion": "addresses(val.primary && val.primary==true).[0].region",
"addressStreeetAddress": "addresses(val.primary && val.primary==true).[0].streetAddress",
"addressType": "addresses(val.primary && val.primary==true).[0].type",
"costCenter": scim_extension + ".costCenter",
"department": scim_extension + ".department",
"displayName": "displayName",
"division": scim_extension + ".division",
"email": "emails(val.primary && val.primary==true).[0].value",
"emailType": "emails(val.primary && val.primary==true).[0].type",
"employeeNumber": scim_extension + ".employeeNumber",
"groups": "groups(val.display).display",
"id": "id",
"externalId": "externalId",
"locale": "locale",
"manager": scim_extension + ".manager.value",
"nameFormatted": "name.formatted",
"nameFamilyName": "name.familyName",
"nameGivenName": "name.givenName",
"nameHonorificPrefix": "name.honorificPrefix",
"nameHonorificSuffix": "name.honorificSuffix",
"nameMiddleName": "name.middleName",
"nickName": "nickName",
"organization": scim_extension + ".organization",
"password": "password",
"photo": "photos(val.type && val.type=='photo').[0].value",
"preferredLanguage": "preferredLanguage",
"profileUrl": "profileUrl",
"thumbnnail": "photos(val.type && val.type=='thumbnail').[0].value",
"timezone": "timezone",
"title": "title",
"userName": "userName",
"userType": "userType",
}
ret = dict()
for k, v in mapping.items():
try:
ret[k] = demisto.dt(clientData, v)
except Exception:
ret[k] = None
return ret
|
33,134 | def rk4(f, x, t, dt, stages=4, s=0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method.
When stages=1, this becomes the Euler (-Maruyama) scheme.
Default: 4.
s : float
The diffusion coeffient (std. dev) for models with additive noise.
Default: 0, yielding deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal([N_e, N_x])
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
| def rk4(f, x, t, dt, stages=4, s=0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method.
When `stages=1`, this becomes the Euler (-Maruyama) scheme.
Default: 4.
s : float
The diffusion coeffient (std. dev) for models with additive noise.
Default: 0, yielding deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal([N_e, N_x])
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
15,725 | def gather_new_integration(determine_auth: bool) -> Info:
"""Gather info about new integration from user."""
fields = {
"name": {
"prompt": "What is the name of your integration?",
"validators": [CHECK_EMPTY],
},
"codeowner": {
"prompt": "What is your GitHub handle?",
"validators": [
CHECK_EMPTY,
[
'GitHub handles need to start with an "@"',
lambda value: value.startswith("@"),
],
],
},
"requirement": {
"prompt": "What PyPI package and version do you depend on? Leave blank for none.",
"validators": [
[
"Versions should be pinned using '=='.",
lambda value: not value or "==" in value,
]
],
},
"iot_class": {
"prompt": (
f"""How will your integration gather data?
Valid values are {', '.join(SUPPORTED_IOT_CLASSES)}
More info @ https://www.home-assistant.io/blog/2016/02/12/classifying-the-internet-of-things/#classifiers
"""
),
"validators": [
[
f"You need to pick one of {', '.join(SUPPORTED_IOT_CLASSES)}",
lambda value: value in SUPPORTED_IOT_CLASSES,
]
],
},
}
if determine_auth:
fields.update(
{
"authentication": {
"prompt": "Does Home Assistant need the user to authenticate to control the device/service? (yes/no)",
"default": "yes",
**YES_NO,
},
"discoverable": {
"prompt": "Is the device/service discoverable on the local network? (yes/no)",
"default": "no",
**YES_NO,
},
"oauth2": {
"prompt": "Can the user authenticate the device using OAuth2? (yes/no)",
"default": "no",
**YES_NO,
},
}
)
return _gather_info(fields)
| def gather_new_integration(determine_auth: bool) -> Info:
"""Gather info about new integration from user."""
fields = {
"name": {
"prompt": "What is the name of your integration?",
"validators": [CHECK_EMPTY],
},
"codeowner": {
"prompt": "What is your GitHub handle?",
"validators": [
CHECK_EMPTY,
[
'GitHub handles need to start with an "@"',
lambda value: value.startswith("@"),
],
],
},
"requirement": {
"prompt": "What PyPI package and version do you depend on? Leave blank for none.",
"validators": [
[
"Versions should be pinned using '=='.",
lambda value: not value or "==" in value,
]
],
},
"iot_class": {
"prompt": (
f"""How will your integration gather data?
Valid values are {', '.join(SUPPORTED_IOT_CLASSES)}
More info @ https://developers.home-assistant.io/docs/creating_integration_manifest#iot-class
"""
),
"validators": [
[
f"You need to pick one of {', '.join(SUPPORTED_IOT_CLASSES)}",
lambda value: value in SUPPORTED_IOT_CLASSES,
]
],
},
}
if determine_auth:
fields.update(
{
"authentication": {
"prompt": "Does Home Assistant need the user to authenticate to control the device/service? (yes/no)",
"default": "yes",
**YES_NO,
},
"discoverable": {
"prompt": "Is the device/service discoverable on the local network? (yes/no)",
"default": "no",
**YES_NO,
},
"oauth2": {
"prompt": "Can the user authenticate the device using OAuth2? (yes/no)",
"default": "no",
**YES_NO,
},
}
)
return _gather_info(fields)
|
30,953 | def create_user_iam(default_base_dn, default_page_size, args):
assert conn is not None
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER, mapping_type='User Profile')
try:
sam_account_name = ad_user.get("samaccountname")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, default_page_size, sam_account_name)
if user_exists:
return return_results("User already exists")
user_dn = generate_dn_and_remove_from_user_profile(ad_user)
object_classes = ["top", "person", "organizationalPerson", "user"]
success = conn.add(user_dn, object_classes, ad_user)
if success:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=ad_user.get("userAccountControl"))
else:
iam_user_profile.set_result(success=False, error_message="Failed to create user")
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
| def create_user_iam(default_base_dn, default_page_size, args):
assert conn is not None
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER, mapping_type='User Profile')
try:
sam_account_name = ad_user.get("samaccountname")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, default_page_size, sam_account_name)
if user_exists:
return "User already exists"
user_dn = generate_dn_and_remove_from_user_profile(ad_user)
object_classes = ["top", "person", "organizationalPerson", "user"]
success = conn.add(user_dn, object_classes, ad_user)
if success:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=ad_user.get("userAccountControl"))
else:
iam_user_profile.set_result(success=False, error_message="Failed to create user")
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
|
45,736 | def _linda_forecast(
precip,
precip_lagr_diff,
timesteps,
fct_gen,
precip_pert_gen,
vel_pert_gen,
n_ensemble_members,
seed,
measure_time,
print_info,
return_output,
callback,
):
"""Compute LINDA nowcast."""
# compute convolved difference fields
precip_lagr_diff = precip_lagr_diff.copy()
for i in range(precip_lagr_diff.shape[0]):
for _ in range(fct_gen["ari_order"] - i):
precip_lagr_diff[i] = _composite_convolution(
precip_lagr_diff[i],
fct_gen["kernels_1"],
fct_gen["interp_weights"],
)
# initialize the random generators
if precip_pert_gen is not None:
rs_precip_pert = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
rs_precip_pert.append(rs)
seed = rs.randint(0, high=1e9)
else:
rs_precip_pert = None
if vel_pert_gen is not None:
vps = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
vp = vel_pert_gen["init_func"](seed)
vps.append(
lambda t, vp=vp: vel_pert_gen["gen_func"](
vp, t * vel_pert_gen["timestep"]
)
)
seed = rs.randint(0, high=1e9)
else:
vps = None
state = {
"precip_fct": [precip[-1].copy() for i in range(n_ensemble_members)],
"precip_lagr_diff": [
precip_lagr_diff.copy() for i in range(n_ensemble_members)
],
"rs_precip_pert": rs_precip_pert,
}
params = {
"interp_weights": fct_gen["interp_weights"],
"kernels_1": fct_gen["kernels_1"],
"kernels_2": fct_gen["kernels_2"],
"mask_adv": fct_gen["mask_adv"],
"num_ens_members": n_ensemble_members,
"num_workers": fct_gen["num_workers"],
"num_ensemble_workers": min(n_ensemble_members, fct_gen["num_workers"]),
"precip_pert_gen": precip_pert_gen,
"psi": fct_gen["psi"],
}
precip_f = nowcast_main_loop(
precip[-1],
fct_gen["velocity"],
state,
timesteps,
fct_gen["extrap_method"],
_update,
extrap_kwargs=fct_gen["extrap_kwargs"],
vel_pert_gen=vps,
params=params,
callback=callback,
return_output=return_output,
num_workers=fct_gen["num_workers"],
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
if return_output:
if not fct_gen["add_perturbations"]:
precip_f = precip_f[0]
if measure_time:
return precip_f, mainloop_time
else:
return precip_f
else:
return None
| def _linda_forecast(
precip,
precip_lagr_diff,
timesteps,
forecast_gen,
precip_pert_gen,
vel_pert_gen,
n_ensemble_members,
seed,
measure_time,
print_info,
return_output,
callback,
):
"""Compute LINDA nowcast."""
# compute convolved difference fields
precip_lagr_diff = precip_lagr_diff.copy()
for i in range(precip_lagr_diff.shape[0]):
for _ in range(fct_gen["ari_order"] - i):
precip_lagr_diff[i] = _composite_convolution(
precip_lagr_diff[i],
fct_gen["kernels_1"],
fct_gen["interp_weights"],
)
# initialize the random generators
if precip_pert_gen is not None:
rs_precip_pert = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
rs_precip_pert.append(rs)
seed = rs.randint(0, high=1e9)
else:
rs_precip_pert = None
if vel_pert_gen is not None:
vps = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
vp = vel_pert_gen["init_func"](seed)
vps.append(
lambda t, vp=vp: vel_pert_gen["gen_func"](
vp, t * vel_pert_gen["timestep"]
)
)
seed = rs.randint(0, high=1e9)
else:
vps = None
state = {
"precip_fct": [precip[-1].copy() for i in range(n_ensemble_members)],
"precip_lagr_diff": [
precip_lagr_diff.copy() for i in range(n_ensemble_members)
],
"rs_precip_pert": rs_precip_pert,
}
params = {
"interp_weights": fct_gen["interp_weights"],
"kernels_1": fct_gen["kernels_1"],
"kernels_2": fct_gen["kernels_2"],
"mask_adv": fct_gen["mask_adv"],
"num_ens_members": n_ensemble_members,
"num_workers": fct_gen["num_workers"],
"num_ensemble_workers": min(n_ensemble_members, fct_gen["num_workers"]),
"precip_pert_gen": precip_pert_gen,
"psi": fct_gen["psi"],
}
precip_f = nowcast_main_loop(
precip[-1],
fct_gen["velocity"],
state,
timesteps,
fct_gen["extrap_method"],
_update,
extrap_kwargs=fct_gen["extrap_kwargs"],
vel_pert_gen=vps,
params=params,
callback=callback,
return_output=return_output,
num_workers=fct_gen["num_workers"],
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
if return_output:
if not fct_gen["add_perturbations"]:
precip_f = precip_f[0]
if measure_time:
return precip_f, mainloop_time
else:
return precip_f
else:
return None
|
56,815 | def group_dataset_datavalues(
dataset,
datavalues_list
) -> list:
"""
This function evaluates a few cases regarding the 'period' and 'orgUnit':
Considerations:
The 'period' and 'orgUnit' must be on specified on the same level as the 'completeDate',
thus the payload MUST be in the following format:
{
"completeDate": <completeDate>,
"orgUnit": <orgUnit>,
"period": <period>,
"dataValues": [...]
}
Since 'completeDate' is specified directly on the `dataset` dictionary, we look there.
Several cases should be considered:
A) 'orgUnit' and 'period' is static, i.e. already on same level as 'completeDate'.
- No further processing, simply add 'dataValues' to dataset and return dataset as is.
B) 'orgUnit' is static and 'period' is dynamic, i.e. 'period' is specified in `datavalues_list`.
- Go through 'datavalues_list' and group by 'period' so that a list of items can be constructed
such that 'period' sits on the same level as 'completeDate'
C) 'period' is static and 'orgUnit' is dynamic, i.e. 'period' is specified in `datavalues_list`.
- Same as B), except 'period' is not 'orgUnit'.
D) both 'period' and 'orgUnit' is dynamic.
- Same as B and C, except both 'orgUnit' and 'period' should be considered.
Return:
A list of grouped 'datavalues_sets', where each 'set' corresponds to the format
specified in "Considerations" above.
"""
if dataset.get('orgUnit') and dataset.get('period'):
dataset['dataValues'] = datavalues_list
return [dataset]
group_by_items = []
if dataset.get('orgUnit') and not dataset.get('period'):
# Need to group datavalues_list by period
group_by_items = ['period']
if dataset.get('period') and not dataset.get('orgUnit'):
# Need to group datavalues_list by orgUnit
group_by_items = ['orgUnit']
if not dataset.get('orgUnit') and not dataset.get('period'):
# Need to group datavalues_list by orgUnit and period
group_by_items = ['orgUnit', 'period']
return get_grouped_datavalues_sets(
group_by=group_by_items,
template_dataset=dataset,
data_list=datavalues_list
)
| def group_dataset_datavalues(
dataset,
datavalues_list
) -> list:
"""
This function evaluates a few cases regarding the 'period' and 'orgUnit':
Considerations:
The 'period' and 'orgUnit' must be on specified on the same level as the 'completeDate',
thus the payload MUST be in the following format:
{
"completeDate": <completeDate>,
"orgUnit": <orgUnit>,
"period": <period>,
"dataValues": [...]
}
Since 'completeDate' is specified directly on the `dataset` dictionary, we look there.
Several cases should be considered:
A) 'orgUnit' and 'period' is static, i.e. already on same level as 'completeDate'.
- No further processing, simply add 'dataValues' to dataset and return dataset as is.
B) 'orgUnit' is static and 'period' is dynamic, i.e. 'period' is specified in `datavalues_list`.
- Go through 'datavalues_list' and group by 'period' so that a list of items can be constructed
such that 'period' sits on the same level as 'completeDate'
C) 'period' is static and 'orgUnit' is dynamic, i.e. 'period' is specified in `datavalues_list`.
- Same as B), except 'period' is not 'orgUnit'.
D) both 'period' and 'orgUnit' is dynamic.
- Same as B and C, except both 'orgUnit' and 'period' should be considered.
Return:
A list of grouped 'datavalues_sets', where each 'set' corresponds to the format
specified in "Considerations" above.
"""
if dataset.get('orgUnit') and dataset.get('period'):
dataset['dataValues'] = datavalues_list
return [dataset]
group_by_items = []
if not dataset.get('period'):
group_by_items.append('period')
if not dataset.get('orgUnit'):
group_by_items.append('orgUnit')
return get_grouped_datavalues_sets(
group_by=group_by_items,
template_dataset=dataset,
data_list=datavalues_list
)
|
4,165 | def combine_evoked(all_evoked, weights):
"""Merge evoked data by weighted addition or subtraction.
Data should have the same channels and the same time instants.
Subtraction can be performed by calling
``combine_evoked([evoked1, -evoked2], 'equal')``
.. warning:: If you pass negative weights, it
can give unexpected results. Use at your own risk.
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | str
The weights to apply to the data of each evoked instance.
Can also be ``'nave'`` to weight according to evoked.nave,
or ``"equal"`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(weights, str):
_check_option('weights', weights, ['nave', 'equal'])
if weights == 'nave':
weights = np.array([e.nave for e in all_evoked], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_evoked)] * len(all_evoked)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
all_evoked = _check_evokeds_ch_names_times(all_evoked)
evoked = all_evoked[0].copy()
# use union of bad channels
bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
for ev in all_evoked[1:])))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
# We should set nave based on how variances change when summing Gaussian
# random variables. From:
#
# https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
#
# We know that the variance of a weighted sample mean is:
#
# σ^2 = w_1^2 σ_1^2 + w_2^2 σ_2^2 + ... + w_n^2 σ_n^2
#
# We estimate the variance of each evoked instance as 1 / nave to get:
#
# σ^2 = w_1^2 / nave_1 + w_2^2 / nave_2 + ... + w_n^2 / nave_n
#
# And our resulting nave is the reciprocal of this:
evoked.nave = max(int(round(
1. / sum(w ** 2 / e.nave for w, e in zip(weights, all_evoked)))), 1)
evoked.comment = ' + '.join('%0.3f * %s' % (w, e.comment or 'unknown')
for w, e in zip(weights, all_evoked))
return evoked
| def combine_evoked(all_evoked, weights):
"""Merge evoked data by weighted addition or subtraction.
Data should have the same channels and the same time instants.
Subtraction can be performed by calling
``combine_evoked([evoked1, -evoked2], 'equal')``
.. warning:: If you pass negative weights, it
can give unexpected results. Use at your own risk.
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | str
The weights to apply to the data of each evoked instance.
Can also be ``'nave'`` to weight according to evoked.nave,
or ``"equal"`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(weights, str):
_check_option('weights', weights, ['nave', 'equal'])
if weights == 'nave':
weights = np.array([e.nave for e in all_evoked], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_evoked)] * len(all_evoked)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
all_evoked = _check_evokeds_ch_names_times(all_evoked)
evoked = all_evoked[0].copy()
# use union of bad channels
bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
for ev in all_evoked[1:])))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
# We should set nave based on how variances change when summing Gaussian
# random variables. From:
#
# https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
#
# We know that the variance of a weighted sample mean is:
#
# σ^2 = w_1^2 σ_1^2 + w_2^2 σ_2^2 + ... + w_n^2 σ_n^2
#
# We estimate the variance of each evoked instance as 1 / nave to get:
#
# σ^2 = w_1^2 / nave_1 + w_2^2 / nave_2 + ... + w_n^2 / nave_n
#
# And our resulting nave is the reciprocal of this:
evoked.nave = max(int(round(
1. / sum(w ** 2 / e.nave for w, e in zip(weights, all_evoked)))), 1)
evoked.comment = ' + '.join('%0.3f * %s' % (w, e.comment or 'unknown')
for w, e in zip(weights, all_evoked))
return evoked
|
30,715 | def list_accounts(client, args):
title = f'{INTEGRATION_NAME} - List of the Accounts'
raws = []
cyberark_ec = []
raw_response = client.get_accounts(offset=args['offset'], limit=args['limit'])['value']
if raw_response:
for item in raw_response:
raws.append(item)
cyberark_ec.append({
'AccountName': item['name'],
'UserName': item['userName'],
'PlatformID': item['platformId'],
'SafeName': item['safeName'],
'AccountID': item['id'],
'CreatedTime': item['createdTime']
})
if not raws:
return f'{INTEGRATION_NAME} - Could not find any Accounts'
context_entry = {
"CyberArk.Accounts": cyberark_ec
}
human_readable = tableToMarkdown(t=context_entry.get('CyberArk.Accounts'), name=title)
return [human_readable, context_entry, raws]
| def list_accounts(client, args):
title = f'{INTEGRATION_NAME} - List of the Accounts'
raws = []
cyberark_ec = []
raw_response = client.get_accounts(offset=args['offset'], limit=args['limit'])['value']
if raw_response:
for item in raw_response:
raws.append(item)
cyberark_ec.append({
'AccountName': item['name'],
'UserName': item['userName'],
'PlatformID': item['platformId'],
'SafeName': item['safeName'],
'AccountID': item['id'],
'CreatedTime': item['createdTime']
})
if not raws:
return (f'{INTEGRATION_NAME} - Could not find any Accounts', {}, {})
context_entry = {
"CyberArk.Accounts": cyberark_ec
}
human_readable = tableToMarkdown(t=context_entry.get('CyberArk.Accounts'), name=title)
return [human_readable, context_entry, raws]
|
13,590 | def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
| def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random sample matrix (`'normal'` or `'uniform'`).
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
40,145 | def create_data_graph_nodes_and_groups(data, parent_uid, root_uid, whitelist):
data_graph = {
'nodes': [],
'edges': [],
'groups': []
}
groups = []
for file in data:
mime = file['processed_analysis']['file_type']['mime']
if mime not in whitelist or root_uid not in file['virtual_file_path']:
continue
if mime not in groups:
groups.append(file['processed_analysis']['file_type']['mime'])
virtual_paths = file['virtual_file_path'][root_uid]
for vpath in virtual_paths:
path_components = split_virtual_path(vpath)
if len(path_components) < 2:
continue
name_component = path_components[-1]
parent_component = path_components[-2]
if parent_component != parent_uid:
continue
node = {
'label': name_component,
'id': vpath,
'entity': file['_id'],
'group': file['processed_analysis']['file_type']['mime'],
'full_file_type': file['processed_analysis']['file_type']['full']
}
data_graph['nodes'].append(node)
data_graph['groups'] = sorted(groups)
return data_graph
| def create_data_graph_nodes_and_groups(data, parent_uid, root_uid, whitelist):
data_graph = {
'nodes': [],
'edges': [],
'groups': []
}
groups = []
for file in data:
mime = file['processed_analysis']['file_type']['mime']
if mime not in whitelist or root_uid not in file['virtual_file_path']:
continue
if mime not in groups:
groups.append(mime)
virtual_paths = file['virtual_file_path'][root_uid]
for vpath in virtual_paths:
path_components = split_virtual_path(vpath)
if len(path_components) < 2:
continue
name_component = path_components[-1]
parent_component = path_components[-2]
if parent_component != parent_uid:
continue
node = {
'label': name_component,
'id': vpath,
'entity': file['_id'],
'group': file['processed_analysis']['file_type']['mime'],
'full_file_type': file['processed_analysis']['file_type']['full']
}
data_graph['nodes'].append(node)
data_graph['groups'] = sorted(groups)
return data_graph
|
30,854 | def format_sort(sort_str: str) -> list:
"""
Format a sort string from "field1:asc,field2:desc" to a list accepted by pymongo.sort()
"field1:asc,field2:desc" => [("field1",1),("field2",-1)]
Args:
sort_str: a sort detailed as a string
Returns:
list accepted by pymongo.sort()
"""
sort_fields = sort_str.split(',')
sort_list = list()
for field in sort_fields:
if ':' not in field:
raise ValueError("`sort` is not in the correct format.")
field, type = field.split(':')
if type not in SORT_TYPE_DICT.keys():
raise ValueError("`sort` is not in the correct format. Please make sure it's either 'asc' or 'desc'")
else:
sort_list.append((field, SORT_TYPE_DICT[type]))
return sort_list
| def format_sort(sort_str: str) -> list:
"""
Format a sort string from "field1:asc,field2:desc" to a list accepted by pymongo.sort()
"field1:asc,field2:desc" => [("field1",1),("field2",-1)]
Args:
sort_str: a sort detailed as a string
Returns:
list accepted by pymongo.sort()
"""
sort_fields = sort_str.split(',')
sort_list = list()
for field in sort_fields:
if ':' not in field:
raise ValueError("`sort` is not in the correct format.")
field, type = field.split(':')
if type not in SORT_TYPE_DICT:
raise ValueError("`sort` is not in the correct format. Please make sure it's either 'asc' or 'desc'")
else:
sort_list.append((field, SORT_TYPE_DICT[type]))
return sort_list
|
28,012 | def convert_reports(reports: Report, repo_dirs: List[str]) -> Dict:
"""Convert the given reports to codeclimate format.
This function will convert the given report to Code Climate format.
reports - list of reports type Report
repo_dir - Root directory of the sources, i.e. the directory where the
repository was cloned, which will be trimmed if set.
returns a list of reports converted to codeclimate format
"""
codeclimate_reports = []
for report in reports:
if repo_dirs:
report.trim_path_prefixes(repo_dirs)
codeclimate_reports.append(__to_codeclimate(report))
return codeclimate_reports
| def convert_reports(reports: List[Report], repo_dirs: List[str]) -> Dict:
"""Convert the given reports to codeclimate format.
This function will convert the given report to Code Climate format.
reports - list of reports type Report
repo_dir - Root directory of the sources, i.e. the directory where the
repository was cloned, which will be trimmed if set.
returns a list of reports converted to codeclimate format
"""
codeclimate_reports = []
for report in reports:
if repo_dirs:
report.trim_path_prefixes(repo_dirs)
codeclimate_reports.append(__to_codeclimate(report))
return codeclimate_reports
|
32,270 | def fetch_incidents(client: Client) -> list:
query_params = {}
incidents = []
last_run = demisto.getLastRun()
date_format = '%Y-%m-%d %H:%M:%S'
start_snow_time, _ = get_fetch_run_time_range(
last_run=last_run, first_fetch=client.fetch_time, look_back=client.look_back, date_format=date_format
)
snow_time_as_date = datetime.strptime(start_snow_time, date_format)
# if we didn't fetch anything, make sure the end time is the same as initial time
end_snow_time = start_snow_time
fetch_limit = last_run.get('limit') or client.sys_param_limit
query = ''
if client.sys_param_query:
query += f'{client.sys_param_query}^'
# get the tickets which occurred after the 'start_snow_time'
query += f'ORDERBY{client.timestamp_field}^{client.timestamp_field}>{start_snow_time}'
if query:
query_params['sysparm_query'] = query
query_params['sysparm_limit'] = fetch_limit # type: ignore[assignment]
demisto.info(f'Fetching ServiceNow incidents. with the query params: {str(query_params)}')
tickets_response = client.send_request(f'table/{client.ticket_type}', 'GET', params=query_params).get('result', [])
count = 0
severity_map = {'1': 3, '2': 2, '3': 1} # Map SNOW severity to Demisto severity for incident creation
for ticket in tickets_response:
ticket.update(get_mirroring())
if client.timestamp_field not in ticket:
raise ValueError(f"The timestamp field [{client.timestamp_field}] does not exist in the ticket")
if count > fetch_limit:
break
try:
if datetime.strptime(ticket[client.timestamp_field], date_format) < snow_time_as_date:
continue
parse_dict_ticket_fields(client, ticket)
except Exception:
pass
incidents.append({
'name': f"ServiceNow Incident {ticket.get(client.incident_name)}",
'labels': [
{'type': _type, 'value': value if isinstance(value, str) else json.dumps(value)}
for _type, value in ticket.items()
],
'details': json.dumps(ticket),
'severity': severity_map.get(ticket.get('severity', ''), 0),
'attachment': get_ticket_file_attachments(client=client, ticket=ticket),
'occurred': ticket.get(client.timestamp_field),
'rawJSON': json.dumps(ticket)
})
count += 1
end_snow_time = ticket.get(client.timestamp_field)
# remove duplicate incidents which were already fetched
incidents = filter_incidents_by_duplicates_and_limit(
incidents_res=incidents, last_run=last_run, fetch_limit=fetch_limit, id_field='name'
)
last_run = update_last_run_object(
last_run=last_run,
incidents=incidents,
fetch_limit=fetch_limit,
start_fetch_time=start_snow_time,
end_fetch_time=end_snow_time,
look_back=client.look_back,
created_time_field='occurred',
id_field='name',
date_format=date_format
)
demisto.info(f'last run at the end of the incidents fetching {last_run}')
for ticket in incidents:
# the occurred time requires to be in ISO format.
ticket['occurred'] = f"{datetime.strptime(ticket.get('occurred'), date_format).isoformat()}Z"
demisto.setLastRun(last_run)
return incidents
| def fetch_incidents(client: Client) -> list:
query_params = {}
incidents = []
last_run = demisto.getLastRun()
date_format = '%Y-%m-%d %H:%M:%S'
start_snow_time, _ = get_fetch_run_time_range(
last_run=last_run, first_fetch=client.fetch_time, look_back=client.look_back, date_format=date_format
)
snow_time_as_date = datetime.strptime(start_snow_time, date_format)
# if we didn't fetch anything, make sure the end time is the same as initial time
end_snow_time = start_snow_time
fetch_limit = last_run.get('limit') or client.sys_param_limit
query = ''
if client.sys_param_query:
query += f'{client.sys_param_query}^'
# get the tickets which occurred after the 'start_snow_time'
query += f'ORDERBY{client.timestamp_field}^{client.timestamp_field}>{start_snow_time}'
if query:
query_params['sysparm_query'] = query
query_params['sysparm_limit'] = fetch_limit # type: ignore[assignment]
demisto.info(f'Fetching ServiceNow incidents. with the query params: {str(query_params)}')
tickets_response = client.send_request(f'table/{client.ticket_type}', 'GET', params=query_params).get('result', [])
count = 0
severity_map = {'1': 3, '2': 2, '3': 1} # Map SNOW severity to Demisto severity for incident creation
for ticket in tickets_response:
ticket.update(get_mirroring())
if client.timestamp_field not in ticket:
raise ValueError(f"The timestamp field [{client.timestamp_field}] does not exist in the ticket")
if count > fetch_limit:
break
try:
if datetime.strptime(ticket[client.timestamp_field], date_format) < snow_time_as_date:
continue
parse_dict_ticket_fields(client, ticket)
except Exception:
pass
incidents.append({
'name': f"ServiceNow Incident {ticket.get(client.incident_name)}",
'labels': [
{'type': _type, 'value': value if isinstance(value, str) else json.dumps(value)}
for _type, value in ticket.items()
],
'details': json.dumps(ticket),
'severity': severity_map.get(ticket.get('severity', ''), 0),
'attachment': get_ticket_file_attachments(client=client, ticket=ticket),
'occurred': ticket.get(client.timestamp_field),
'rawJSON': json.dumps(ticket)
})
count += 1
end_snow_time = ticket.get(client.timestamp_field)
# remove duplicate incidents which were already fetched
incidents = filter_incidents_by_duplicates_and_limit(
incidents_res=incidents, last_run=last_run, fetch_limit=fetch_limit, id_field='name'
)
last_run = update_last_run_object(
last_run=last_run,
incidents=incidents,
fetch_limit=fetch_limit,
start_fetch_time=start_snow_time,
end_fetch_time=end_snow_time,
look_back=client.look_back,
created_time_field='occurred',
id_field='name',
date_format=date_format
)
demisto.debug(f'last run at the end of the incidents fetching {last_run}')
for ticket in incidents:
# the occurred time requires to be in ISO format.
ticket['occurred'] = f"{datetime.strptime(ticket.get('occurred'), date_format).isoformat()}Z"
demisto.setLastRun(last_run)
return incidents
|
11,310 | def _get_secret_key(response):
# type: (PipelineResponse) -> str
# expecting header containing path to secret key file
header = response.http_response.headers.get("Www-Authenticate")
if header is None:
raise CredentialUnavailableError(message="Did not receive a value from Www-Authenticate header")
secret_key = None
# expecting header with structure like 'Basic realm=<file path>'
key_file = header.split("=")[1]
with open(key_file, "r") as file:
try:
secret_key = file.read()
except Exception as error: # pylint:disable=broad-except
# user is expected to have obtained read permission prior to this being called
raise CredentialUnavailableError(message="Could not read file {} contents: {}".format(key_file, error))
return secret_key
| def _get_secret_key(response):
# type: (PipelineResponse) -> str
# expecting header containing path to secret key file
header = response.http_response.headers.get("Www-Authenticate")
if header is None:
raise CredentialUnavailableError(message="Did not receive a value from Www-Authenticate header")
secret_key = None
# expecting header with structure like 'Basic realm=<file path>'
key_file = header.split("=")[1]
with open(key_file, "r") as file:
try:
return file.read()
except Exception as error: # pylint:disable=broad-except
# user is expected to have obtained read permission prior to this being called
raise CredentialUnavailableError(message="Could not read file {} contents: {}".format(key_file, error))
return secret_key
|
1,361 | def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples in each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for j in [0, 1]:
if merge[j] < n_samples:
current_count += 1
else:
current_count += counts[merge[j] - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
| def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples in each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for j in [0, 1]:
if child_idx < n_samples:
current_count += 1
else:
current_count += counts[merge[j] - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
|
39,712 | def _is_resolved(what):
return (what is None or isinstance(what, (dict, list)))
| def _is_resolved(what):
return isinstance(what, (dict, list, NoneType))
|
12,995 | def set_total_prices(apps, schema_editor):
OrderLine = apps.get_model("order", "OrderLine")
lines = []
for line in OrderLine.objects.filter(total_price_gross_amount__isnull=True):
line.total_price_gross_amount = line.unit_price_gross_amount * line.quantity
line.total_price_net_amount = line.unit_price_net_amount * line.quantity
lines.append(line)
OrderLine.objects.bulk_update(
lines, ["total_price_gross_amount", "total_price_net_amount"], batch_size=1000
)
| def set_total_prices(apps, schema_editor):
OrderLine = apps.get_model("order", "OrderLine")
lines = []
for line in OrderLine.objects.filter(total_price_gross_amount__isnull=True).iterator():
line.total_price_gross_amount = line.unit_price_gross_amount * line.quantity
line.total_price_net_amount = line.unit_price_net_amount * line.quantity
lines.append(line)
OrderLine.objects.bulk_update(
lines, ["total_price_gross_amount", "total_price_net_amount"], batch_size=1000
)
|
46,184 | def check_layout_layers(layout, layers):
"""
Check the layer widget order matches the layers order in the layout
Parameters
----------
layout : QLayout
Layout to test
layers : napar.components.LayersList
LayersList to compare to
Returns
----------
match : bool
Boolean if layout matches layers
"""
layers_layout = [
layout.itemAt(2 * i - 1).widget().layer
for i in range(len(layers), 0, -1)
]
return layers_layout == [l for l in layers]
| def check_layout_layers(layout, layers):
"""
Check the layer widget order matches the layers order in the layout
Parameters
----------
layout : QLayout
Layout to test
layers : napari.components.LayersList
LayersList to compare to
Returns
----------
match : bool
Boolean if layout matches layers
"""
layers_layout = [
layout.itemAt(2 * i - 1).widget().layer
for i in range(len(layers), 0, -1)
]
return layers_layout == [l for l in layers]
|
39,160 | def _get_packages():
exclude = [
"build*",
"test*",
"torchaudio.csrc*",
"third_party*",
"build_tools*",
]
exclude_prototype = False
branch_name = _run_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
is_on_tag = _run_cmd(['git', 'describe', '--tags', '--exact-match', '@'])
print('-- On branch:', branch_name)
print('-- On tag:', is_on_tag)
if branch_name is not None and branch_name.startswith('release/'):
exclude_prototype = True
if is_on_tag is not None and re.match(r'v[\d.]+(-rc\d+)?', is_on_tag):
exclude_prototype = True
if exclude_prototype:
print('Excluding torchaudio.prototype from the package.')
exclude.append("torchaudio.prototype")
return find_packages(exclude=exclude)
| def _get_packages():
exclude = [
"build*",
"test*",
"torchaudio.csrc*",
"third_party*",
"tools*",
]
exclude_prototype = False
branch_name = _run_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
is_on_tag = _run_cmd(['git', 'describe', '--tags', '--exact-match', '@'])
print('-- On branch:', branch_name)
print('-- On tag:', is_on_tag)
if branch_name is not None and branch_name.startswith('release/'):
exclude_prototype = True
if is_on_tag is not None and re.match(r'v[\d.]+(-rc\d+)?', is_on_tag):
exclude_prototype = True
if exclude_prototype:
print('Excluding torchaudio.prototype from the package.')
exclude.append("torchaudio.prototype")
return find_packages(exclude=exclude)
|
58,296 | def log_umount_blockers(mount: Path) -> None:
blockers: List[Tuple[str, Path]] = []
for d in Path("/proc").iterdir():
if not d.is_dir():
continue
try:
int(d.name)
except ValueError:
continue
if not d.joinpath("fd").exists():
continue
comm = d.joinpath("comm").read_text()
for p in d.joinpath("fd").iterdir():
p = p.resolve()
if not str(p).startswith(str(mount)):
continue
if path_startswith_any(p, ("/dev", "/sys", "/proc")):
continue
blockers += [(comm, p)]
if blockers:
MkosiPrinter.warn(f"Unmounting '{mount}' is blocked by:")
for comm, p in blockers:
MkosiPrinter.warn(f"- {comm} ({p})")
| def log_umount_blockers(mount: Path) -> None:
blockers: List[Tuple[str, Path]] = []
for d in Path("/proc").iterdir():
if not d.is_dir():
continue
if not d.name.isdigit():
continue
if not d.joinpath("fd").exists():
continue
comm = d.joinpath("comm").read_text()
for p in d.joinpath("fd").iterdir():
p = p.resolve()
if not str(p).startswith(str(mount)):
continue
if path_startswith_any(p, ("/dev", "/sys", "/proc")):
continue
blockers += [(comm, p)]
if blockers:
MkosiPrinter.warn(f"Unmounting '{mount}' is blocked by:")
for comm, p in blockers:
MkosiPrinter.warn(f"- {comm} ({p})")
|
3,548 | def _get_doc_content(project, version, doc):
storage_path = project.get_storage_path(
'json',
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
file_path = build_media_storage.join(storage_path, f'{doc}.fjson')
try:
with build_media_storage.open(file_path) as file:
file_contents = file.read()
return json.loads(file_contents)
except Exception: # noqa
log.warning('Unable to read file: %s', file_path)
return None
| def _get_doc_content(project, version, doc):
storage_path = project.get_storage_path(
'json',
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
file_path = build_media_storage.join(storage_path, f'{doc}.fjson')
try:
with build_media_storage.open(file_path) as file:
file_contents = file.read()
return json.loads(file_contents)
except Exception: # noqa
log.warning('Unable to read file. file_path=%s', file_path)
return None
|
31,397 | def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
# get the service API url
base_url = params.get('server_url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
auth=(username, password),
verify=verify_certificate,
proxy=proxy)
if command == 'test-module':
return_results(test_module(client))
elif command == 'cisco-stealthwatch-query-flows-initialize':
return_results(
cisco_stealthwatch_query_flows_initialize_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-query-flows-status':
return_results(cisco_stealthwatch_query_flows_status_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-query-flows-results':
return_results(cisco_stealthwatch_query_flows_results_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-list-tags':
return_results(cisco_stealthwatch_list_tags_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-get-tag':
return_results(cisco_stealthwatch_get_tag_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-list-tenants':
return_results(cisco_stealthwatch_list_tenants_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-get-tag-hourly-traffic-report':
return_results(
cisco_stealthwatch_get_tag_hourly_traffic_report_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-get-top-alarming-tags':
return_results(
cisco_stealthwatch_get_top_alarming_tags_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-list-security-events-initialize':
return_results(
cisco_stealthwatch_list_security_events_initialize_command(client,
**demisto.args()))
elif command == 'cisco-stealthwatch-list-security-events-status':
return_results(cisco_stealthwatch_list_security_events_status_command(client,
**demisto.args()))
elif command == 'cisco-stealthwatch-list-security-events-results':
return_results(
cisco_stealthwatch_list_security_events_results_command(client, **demisto.args()))
# Log exceptions
except Exception as error:
if 'Entity not found.' in str(error) or 'Not Found.' in str(error):
return_results("Entity not found: one or more of the IDs you've entered is illegal, "
"or was not found.")
else:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(error)}')
| def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
# get the service API url
base_url = params.get('server_url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
auth=(username, password),
verify=verify_certificate,
proxy=proxy)
if command == 'test-module':
return_results(test_module(client))
elif command == 'cisco-stealthwatch-query-flows-initialize':
return_results(
cisco_stealthwatch_query_flows_initialize_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-query-flows-status':
return_results(cisco_stealthwatch_query_flows_status_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-query-flows-results':
return_results(cisco_stealthwatch_query_flows_results_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-list-tags':
return_results(cisco_stealthwatch_list_tags_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-get-tag':
return_results(cisco_stealthwatch_get_tag_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-list-tenants':
return_results(cisco_stealthwatch_list_tenants_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-get-tag-hourly-traffic-report':
return_results(
cisco_stealthwatch_get_tag_hourly_traffic_report_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-get-top-alarming-tags':
return_results(
cisco_stealthwatch_get_top_alarming_tags_command(client, **demisto.args()))
elif command == 'cisco-stealthwatch-list-security-events-initialize':
return_results(
cisco_stealthwatch_list_security_events_initialize_command(client,
**demisto.args()))
elif command == 'cisco-stealthwatch-list-security-events-status':
return_results(cisco_stealthwatch_list_security_events_status_command(client,
**demisto.args()))
elif command == 'cisco-stealthwatch-list-security-events-results':
return_results(
cisco_stealthwatch_list_security_events_results_command(client, **demisto.args()))
# Log exceptions
except Exception as error:
if 'Entity not found.' in str(error) or 'Not Found.' in str(error):
return_results("Entity not found: one or more of the IDs you've entered is illegal, "
"or was not found.")
else:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(error)}')
|
31,839 | def checkpoint_delete_objects_batch_command(client: Client, object_type: str, name):
printable_result = {}
readable_output = ''
name = argToList(name)
del_list = []
for n in name:
tmp_dict = {'name': n}
del_list.append(tmp_dict)
result = current_result = client.delete_objects_batch(object_type, del_list)
if result:
printable_result = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for delete-objects-batch command:',
printable_result)
command_results = CommandResults(
outputs_prefix='CheckPoint.delete_objects_batch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results
| def checkpoint_delete_objects_batch_command(client: Client, object_type: str, name):
printable_result = {}
readable_output = ''
name = argToList(name)
objects_to_delete = [{'name':object_name} for object_name in object_names]
result = current_result = client.delete_objects_batch(object_type, del_list)
if result:
printable_result = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for delete-objects-batch command:',
printable_result)
command_results = CommandResults(
outputs_prefix='CheckPoint.delete_objects_batch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results
|
53,832 | def handle_exception(ex): # pylint: disable=too-many-return-statements
# For error code, follow guidelines at https://docs.python.org/2/library/sys.html#sys.exit,
from jmespath.exceptions import JMESPathTypeError
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import HttpOperationError, ValidationError, ClientRequestError
from azure.cli.core.azlogging import CommandLoggerContext
with CommandLoggerContext(logger):
if isinstance(ex, JMESPathTypeError):
logger.error("\nInvalid jmespath query supplied for `--query`:\n%s", ex)
logger.error("To learn more about --query, please visit: "
"https://docs.microsoft.com/cli/azure/query-azure-cli?view=azure-cli-latest")
return 1
if isinstance(ex, (CLIError, CloudError, AzureException)):
logger.error(ex.args[0])
try:
logger.error(ex.args[0].error.details[0])
except (AttributeError, IndexError):
pass
return ex.args[1] if len(ex.args) >= 2 else 1
if isinstance(ex, ValidationError):
logger.error('validation error: %s', ex)
return 1
if isinstance(ex, ClientRequestError):
msg = str(ex)
if 'SSLError' in msg:
logger.error("request failed: %s", SSLERROR_TEMPLATE.format(msg))
else:
logger.error("request failed: %s", ex)
return 1
if isinstance(ex, KeyboardInterrupt):
return 1
if isinstance(ex, HttpOperationError):
try:
response_dict = json.loads(ex.response.text)
error = response_dict['error']
# ARM should use ODATA v4. So should try this first.
# http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
if isinstance(error, dict):
code = "{} - ".format(error.get('code', 'Unknown Code'))
message = error.get('message', ex)
logger.error("%s%s", code, message)
else:
logger.error(error)
except (ValueError, KeyError):
logger.error(ex)
return 1
logger.error("The command failed with an unexpected error. Here is the traceback:\n")
logger.exception(ex)
logger.warning("\nTo open an issue, please run: 'az feedback'")
return 1
| def handle_exception(ex): # pylint: disable=too-many-return-statements
# For error code, follow guidelines at https://docs.python.org/2/library/sys.html#sys.exit,
from jmespath.exceptions import JMESPathTypeError
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import HttpOperationError, ValidationError, ClientRequestError
from azure.cli.core.azlogging import CommandLoggerContext
with CommandLoggerContext(logger):
if isinstance(ex, JMESPathTypeError):
logger.error("\nInvalid jmespath query supplied for `--query`:\n%s", ex)
logger.error("To learn more about --query, please visit: "
"https://docs.microsoft.com/cli/azure/query-azure-cli?view=azure-cli-latest")
return 1
if isinstance(ex, (CLIError, CloudError, AzureException)):
logger.error(ex.args[0])
try:
for detail in ex.args[0].error.details:
logger.error(detail)
except (AttributeError, TypeError):
pass
return ex.args[1] if len(ex.args) >= 2 else 1
if isinstance(ex, ValidationError):
logger.error('validation error: %s', ex)
return 1
if isinstance(ex, ClientRequestError):
msg = str(ex)
if 'SSLError' in msg:
logger.error("request failed: %s", SSLERROR_TEMPLATE.format(msg))
else:
logger.error("request failed: %s", ex)
return 1
if isinstance(ex, KeyboardInterrupt):
return 1
if isinstance(ex, HttpOperationError):
try:
response_dict = json.loads(ex.response.text)
error = response_dict['error']
# ARM should use ODATA v4. So should try this first.
# http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
if isinstance(error, dict):
code = "{} - ".format(error.get('code', 'Unknown Code'))
message = error.get('message', ex)
logger.error("%s%s", code, message)
else:
logger.error(error)
except (ValueError, KeyError):
logger.error(ex)
return 1
logger.error("The command failed with an unexpected error. Here is the traceback:\n")
logger.exception(ex)
logger.warning("\nTo open an issue, please run: 'az feedback'")
return 1
|
23,667 | def parse_psm3(fbuf, map_variables):
"""
Parse an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
is described in [1]_ and the SAM CSV format is described in [2]_.
.. versionchanged:: 0.9.0
The function now returns a tuple where the first element is a dataframe
and the second element is a dictionary containing metadata. Previous
versions of this function had the return values switched.
Parameters
----------
fbuf: file-like object
File-like object containing data to read.
map_variables: bool
When true, renames columns of the Dataframe to pvlib variable names
where applicable. See variable PSM3_VARIABLE_MAP.
Returns
-------
data : pandas.DataFrame
timeseries data from NREL PSM3
metadata : dict
metadata from NREL PSM3 about the record, see notes for fields
Notes
-----
The return is a tuple with two items. The first item is a dataframe with
the PSM3 timeseries data.
The second item is a dictionary with metadata from NREL PSM3 about the
record containing the following fields:
* Source
* Location ID
* City
* State
* Country
* Latitude
* Longitude
* Time Zone
* Elevation
* Local Time Zone
* Clearsky DHI Units
* Clearsky DNI Units
* Clearsky GHI Units
* Dew Point Units
* DHI Units
* DNI Units
* GHI Units
* Solar Zenith Angle Units
* Temperature Units
* Pressure Units
* Relative Humidity Units
* Precipitable Water Units
* Wind Direction Units
* Wind Speed
* Cloud Type -15
* Cloud Type 0
* Cloud Type 1
* Cloud Type 2
* Cloud Type 3
* Cloud Type 4
* Cloud Type 5
* Cloud Type 6
* Cloud Type 7
* Cloud Type 8
* Cloud Type 9
* Cloud Type 10
* Cloud Type 11
* Cloud Type 12
* Fill Flag 0
* Fill Flag 1
* Fill Flag 2
* Fill Flag 3
* Fill Flag 4
* Fill Flag 5
* Surface Albedo Units
* Version
Examples
--------
>>> # Read a local PSM3 file:
>>> with open(filename, 'r') as f: # doctest: +SKIP
... df, metadata = iotools.parse_psm3(f) # doctest: +SKIP
See Also
--------
pvlib.iotools.read_psm3, pvlib.iotools.get_psm3
References
----------
.. [1] `NREL National Solar Radiation Database (NSRDB)
<https://nsrdb.nrel.gov/>`_
.. [2] `Standard Time Series Data File Format
<https://rredc.nrel.gov/solar/old_data/nsrdb/2005-2012/wfcsv.pdf>`_
"""
# The first 2 lines of the response are headers with metadata
metadata_fields = fbuf.readline().split(',')
metadata_fields[-1] = metadata_fields[-1].strip() # strip trailing newline
metadata_values = fbuf.readline().split(',')
metadata_values[-1] = metadata_values[-1].strip() # strip trailing newline
metadata = dict(zip(metadata_fields, metadata_values))
# the response is all strings, so set some metadata types to numbers
metadata['Local Time Zone'] = int(metadata['Local Time Zone'])
metadata['Time Zone'] = int(metadata['Time Zone'])
metadata['Latitude'] = float(metadata['Latitude'])
metadata['Longitude'] = float(metadata['Longitude'])
metadata['Elevation'] = int(metadata['Elevation'])
# get the column names so we can set the dtypes
columns = fbuf.readline().split(',')
columns[-1] = columns[-1].strip() # strip trailing newline
# Since the header has so many columns, excel saves blank cols in the
# data below the header lines.
columns = [col for col in columns if col != '']
dtypes = dict.fromkeys(columns, float) # all floats except datevec
dtypes.update(Year=int, Month=int, Day=int, Hour=int, Minute=int)
dtypes['Cloud Type'] = int
dtypes['Fill Flag'] = int
data = pd.read_csv(
fbuf, header=None, names=columns, usecols=columns, dtype=dtypes,
delimiter=',', lineterminator='\n') # skip carriage returns \r
# the response 1st 5 columns are a date vector, convert to datetime
dtidx = pd.to_datetime(
data[['Year', 'Month', 'Day', 'Hour', 'Minute']])
# in USA all timezones are integers
tz = 'Etc/GMT%+d' % -metadata['Time Zone']
data.index = pd.DatetimeIndex(dtidx).tz_localize(tz)
if map_variables is None:
warnings.warn(
'PSM3 variable names will be renamed to pvlib conventions by '
'default starting in pvlib 0.11.0. Specify map_variables=True '
'to enable that behavior now, or specify map_variables=False '
'to hide this warning.', pvlibDeprecationWarning)
map_variables = False
if map_variables:
data = data.rename(columns=PSM3_VARIABLE_MAP)
metadata['latitude'] = metadata.pop('Latitude')
metadata['longitude'] = metadata.pop('Longitude')
metadata['elevation'] = metadata.pop('Elevation')
return data, metadata
| def parse_psm3(fbuf, map_variables=None):
"""
Parse an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
is described in [1]_ and the SAM CSV format is described in [2]_.
.. versionchanged:: 0.9.0
The function now returns a tuple where the first element is a dataframe
and the second element is a dictionary containing metadata. Previous
versions of this function had the return values switched.
Parameters
----------
fbuf: file-like object
File-like object containing data to read.
map_variables: bool
When true, renames columns of the Dataframe to pvlib variable names
where applicable. See variable PSM3_VARIABLE_MAP.
Returns
-------
data : pandas.DataFrame
timeseries data from NREL PSM3
metadata : dict
metadata from NREL PSM3 about the record, see notes for fields
Notes
-----
The return is a tuple with two items. The first item is a dataframe with
the PSM3 timeseries data.
The second item is a dictionary with metadata from NREL PSM3 about the
record containing the following fields:
* Source
* Location ID
* City
* State
* Country
* Latitude
* Longitude
* Time Zone
* Elevation
* Local Time Zone
* Clearsky DHI Units
* Clearsky DNI Units
* Clearsky GHI Units
* Dew Point Units
* DHI Units
* DNI Units
* GHI Units
* Solar Zenith Angle Units
* Temperature Units
* Pressure Units
* Relative Humidity Units
* Precipitable Water Units
* Wind Direction Units
* Wind Speed
* Cloud Type -15
* Cloud Type 0
* Cloud Type 1
* Cloud Type 2
* Cloud Type 3
* Cloud Type 4
* Cloud Type 5
* Cloud Type 6
* Cloud Type 7
* Cloud Type 8
* Cloud Type 9
* Cloud Type 10
* Cloud Type 11
* Cloud Type 12
* Fill Flag 0
* Fill Flag 1
* Fill Flag 2
* Fill Flag 3
* Fill Flag 4
* Fill Flag 5
* Surface Albedo Units
* Version
Examples
--------
>>> # Read a local PSM3 file:
>>> with open(filename, 'r') as f: # doctest: +SKIP
... df, metadata = iotools.parse_psm3(f) # doctest: +SKIP
See Also
--------
pvlib.iotools.read_psm3, pvlib.iotools.get_psm3
References
----------
.. [1] `NREL National Solar Radiation Database (NSRDB)
<https://nsrdb.nrel.gov/>`_
.. [2] `Standard Time Series Data File Format
<https://rredc.nrel.gov/solar/old_data/nsrdb/2005-2012/wfcsv.pdf>`_
"""
# The first 2 lines of the response are headers with metadata
metadata_fields = fbuf.readline().split(',')
metadata_fields[-1] = metadata_fields[-1].strip() # strip trailing newline
metadata_values = fbuf.readline().split(',')
metadata_values[-1] = metadata_values[-1].strip() # strip trailing newline
metadata = dict(zip(metadata_fields, metadata_values))
# the response is all strings, so set some metadata types to numbers
metadata['Local Time Zone'] = int(metadata['Local Time Zone'])
metadata['Time Zone'] = int(metadata['Time Zone'])
metadata['Latitude'] = float(metadata['Latitude'])
metadata['Longitude'] = float(metadata['Longitude'])
metadata['Elevation'] = int(metadata['Elevation'])
# get the column names so we can set the dtypes
columns = fbuf.readline().split(',')
columns[-1] = columns[-1].strip() # strip trailing newline
# Since the header has so many columns, excel saves blank cols in the
# data below the header lines.
columns = [col for col in columns if col != '']
dtypes = dict.fromkeys(columns, float) # all floats except datevec
dtypes.update(Year=int, Month=int, Day=int, Hour=int, Minute=int)
dtypes['Cloud Type'] = int
dtypes['Fill Flag'] = int
data = pd.read_csv(
fbuf, header=None, names=columns, usecols=columns, dtype=dtypes,
delimiter=',', lineterminator='\n') # skip carriage returns \r
# the response 1st 5 columns are a date vector, convert to datetime
dtidx = pd.to_datetime(
data[['Year', 'Month', 'Day', 'Hour', 'Minute']])
# in USA all timezones are integers
tz = 'Etc/GMT%+d' % -metadata['Time Zone']
data.index = pd.DatetimeIndex(dtidx).tz_localize(tz)
if map_variables is None:
warnings.warn(
'PSM3 variable names will be renamed to pvlib conventions by '
'default starting in pvlib 0.11.0. Specify map_variables=True '
'to enable that behavior now, or specify map_variables=False '
'to hide this warning.', pvlibDeprecationWarning)
map_variables = False
if map_variables:
data = data.rename(columns=PSM3_VARIABLE_MAP)
metadata['latitude'] = metadata.pop('Latitude')
metadata['longitude'] = metadata.pop('Longitude')
metadata['elevation'] = metadata.pop('Elevation')
return data, metadata
|
2,625 | def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True):
"""
Family of parametric, monotonic transformations to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, power_transform supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
.. versionchanged:: 0.23
The default value of the `method` parameter changed from
'box-cox' to 'yeo-johnson' in 0.23.
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
See Also
--------
PowerTransformer : Equivalent transformation with the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
quantile_transform : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import power_transform
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(power_transform(data, method='box-cox'))
[[-1.332... -0.707...]
[ 0.256... -0.707...]
[ 1.076... 1.414...]]
.. warning:: Risk of data leak.
Do not use :func:`~sklearn.preprocessing.power_transform` unless you
know what you are doing. A common mistake is to apply it to the entire
data *before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.PowerTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),
LogisticRegression())`.
"""
pt = PowerTransformer(method=method, standardize=standardize, copy=copy)
return pt.fit_transform(X)
| def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True):
"""Parametric, monotonic transformation to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, power_transform supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
.. versionchanged:: 0.23
The default value of the `method` parameter changed from
'box-cox' to 'yeo-johnson' in 0.23.
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
See Also
--------
PowerTransformer : Equivalent transformation with the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
quantile_transform : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import power_transform
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(power_transform(data, method='box-cox'))
[[-1.332... -0.707...]
[ 0.256... -0.707...]
[ 1.076... 1.414...]]
.. warning:: Risk of data leak.
Do not use :func:`~sklearn.preprocessing.power_transform` unless you
know what you are doing. A common mistake is to apply it to the entire
data *before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.PowerTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),
LogisticRegression())`.
"""
pt = PowerTransformer(method=method, standardize=standardize, copy=copy)
return pt.fit_transform(X)
|
724 | def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
if settings.get("LOG_ROTATING") is True:
max_bytes = settings.get('LOG_MAX_BYTES', 0)
log_backup_count = settings.get('LOG_BACKUP_COUNT', 0)
handler = RotatingFileHandler(filename, maxBytes=max_bytes,
backupCount=log_backup_count,
encoding=encoding)
else:
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
if settings.getbool('LOG_SHORT_NAMES'):
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
| def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
if settings.getbool("LOG_ROTATING") is True:
max_bytes = settings.get('LOG_MAX_BYTES', 0)
log_backup_count = settings.get('LOG_BACKUP_COUNT', 0)
handler = RotatingFileHandler(filename, maxBytes=max_bytes,
backupCount=log_backup_count,
encoding=encoding)
else:
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
if settings.getbool('LOG_SHORT_NAMES'):
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
|
22,718 | def _prepare_environ(workspace):
new_environ = os.environ.copy()
new_environ['TMPDIR'] = workspace
# So, pytest is nice, and a little to for our usage.
# In order to help user to call seamlessly any piece of python code without requiring to
# install it as a full-fledged setuptools distribution for instance, it injects the current
# path into the PYTHONPATH environment variable. This allows the python interpreter to import
# as modules any python file available in current working directory.
# See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description.
# However this behavior is not good in integration tests, in particular the nginx oldest ones.
# Indeed during these kind of tests certbot is installed as a transitive dependency to
# certbot-nginx. Here is the trick: this certbot version is not necessarily the same than
# the certbot codebase lying in current working directory. For instance in oldest tests
# certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0.
# If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the
# modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0).
# This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if
# PYTHONPATH is set, it does not contain the current working directory.
if new_environ.get('PYTHONPATH'):
# certbot_integration_tests.__file__ is:
# '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc'
# ... and we want '/path/to/certbot'
certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__)))
python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root]
new_environ['PYTHONPATH'] = ':'.join(python_paths)
return new_environ
| def _prepare_environ(workspace):
new_environ = os.environ.copy()
new_environ['TMPDIR'] = workspace
# So, pytest is nice, and a little too nice for our usage.
# In order to help user to call seamlessly any piece of python code without requiring to
# install it as a full-fledged setuptools distribution for instance, it injects the current
# path into the PYTHONPATH environment variable. This allows the python interpreter to import
# as modules any python file available in current working directory.
# See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description.
# However this behavior is not good in integration tests, in particular the nginx oldest ones.
# Indeed during these kind of tests certbot is installed as a transitive dependency to
# certbot-nginx. Here is the trick: this certbot version is not necessarily the same than
# the certbot codebase lying in current working directory. For instance in oldest tests
# certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0.
# If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the
# modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0).
# This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if
# PYTHONPATH is set, it does not contain the current working directory.
if new_environ.get('PYTHONPATH'):
# certbot_integration_tests.__file__ is:
# '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc'
# ... and we want '/path/to/certbot'
certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__)))
python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root]
new_environ['PYTHONPATH'] = ':'.join(python_paths)
return new_environ
|
569 | def get_user_id(form_id):
key = f'xform-{form_id}-user_id'
user_id = cache.get(key)
if not user_id:
try:
user_id = FormAccessorSQL.get_form(form_id).metadata.userID
except XFormNotFound:
return None
cache.set(key, user_id, 12 * 60 * 60)
return user_id
| def get_user_id(form_id):
key = f'xform-{form_id}-user_id'
user_id = cache.get(key)
if not user_id:
try:
user_id = FormAccessorSQL.get_form(form_id).user_id
except XFormNotFound:
return None
cache.set(key, user_id, 12 * 60 * 60)
return user_id
|
38,816 | def main(args):
"""
function for the initial repositories fetch and manual repositories updates
"""
if not args:
_show_usage()
sys.exit(0)
logdir = os.path.dirname(CONFIG["path.log.fetch"])
if not os.path.exists(logdir):
os.makedirs()
logging.basicConfig(
filename=CONFIG["path.log.fetch"],
level=logging.DEBUG,
format='%(asctime)s %(message)s')
if args[0] == 'fetch-all':
fetch_all()
elif args[0] == 'update':
update_by_name(sys.argv[1])
elif args[0] == 'update-all':
update_all()
else:
_show_usage()
sys.exit(0)
| def main(args):
"""
function for the initial repositories fetch and manual repositories updates
"""
if not args:
_show_usage()
sys.exit(0)
logdir = os.path.dirname(CONFIG["path.log.fetch"])
if not os.path.exists(logdir):
os.makedirs(logdir)
logging.basicConfig(
filename=CONFIG["path.log.fetch"],
level=logging.DEBUG,
format='%(asctime)s %(message)s')
if args[0] == 'fetch-all':
fetch_all()
elif args[0] == 'update':
update_by_name(sys.argv[1])
elif args[0] == 'update-all':
update_all()
else:
_show_usage()
sys.exit(0)
|
43,811 | def _inner_out_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the expanded inner portion of the Hamiltonian in :func:`out_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
( \sum_{j,(i,j)\in E}\hat{Z}_{ij}) )^{2}
Args:
graph (nx.DiGraph): the graph specifying possible edges
node (int): a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the node-constraint Hamiltonian.
"""
coeffs = []
ops = []
edges_to_qubits = edges_to_wires(graph)
out_edges = graph.out_edges(node)
d = len(out_edges)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wire))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(-2 * (d - 1))
ops.append(qml.PauliZ(wire))
coeffs.append(d * (d - 2))
ops.append(qml.Identity(0))
coeffs, ops = _collect_duplicates(coeffs, ops)
hamiltonian = qml.Hamiltonian(coeffs, ops)
return hamiltonian
| def _inner_out_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the inner portion of the Hamiltonian in :func:`out_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
( \sum_{j,(i,j)\in E}\hat{Z}_{ij}) )^{2}
Args:
graph (nx.DiGraph): the graph specifying possible edges
node (int): a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the node-constraint Hamiltonian.
"""
coeffs = []
ops = []
edges_to_qubits = edges_to_wires(graph)
out_edges = graph.out_edges(node)
d = len(out_edges)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wire))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(-2 * (d - 1))
ops.append(qml.PauliZ(wire))
coeffs.append(d * (d - 2))
ops.append(qml.Identity(0))
coeffs, ops = _collect_duplicates(coeffs, ops)
hamiltonian = qml.Hamiltonian(coeffs, ops)
return hamiltonian
|
9,071 | def _receive_cap_nak(bot: SopelWrapper, trigger: Trigger) -> None:
was_completed = bot.cap_requests.is_complete
cap_ack = bot.capabilities.handle_nak(bot, trigger)
try:
result: Optional[
List[Tuple[bool, Optional[plugin.CapabilityNegotiation]]]
] = bot.cap_requests.deny(bot, cap_ack)
except config.ConfigurationError as error:
LOGGER.error(
'Configuration error on NAK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Wrong configuration.')
return None
except Exception as error:
LOGGER.exception(
'Error on NAK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Error negotiating capabilities.')
return None
if result is None:
# a plugin may have request the capability without using the proper
# interface: ignore
return None
_handle_cap_acknowledgement(bot, cap_ack, result, was_completed)
| def _receive_cap_nak(bot: SopelWrapper, trigger: Trigger) -> None:
was_completed = bot.cap_requests.is_complete
cap_ack = bot.capabilities.handle_nak(bot, trigger)
try:
result: Optional[
List[Tuple[bool, Optional[plugin.CapabilityNegotiation]]]
] = bot.cap_requests.deny(bot, cap_ack)
except config.ConfigurationError as error:
LOGGER.error(
'Configuration error on NAK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Wrong configuration.')
return None
except Exception as error:
LOGGER.exception(
'Error on NAK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Error negotiating capabilities.')
return None
if result is None:
# a plugin may have requested the capability without using the proper
# interface: ignore
return None
_handle_cap_acknowledgement(bot, cap_ack, result, was_completed)
|
32,770 | def _api_template_string(wrapped, instance, args, kwargs):
tracer = _get_tracer(instance)
with tracer.trace("responder.render_template"):
return wrapped(*args, **kwargs)
| def _api_template_string(wrapped, instance, args, kwargs):
tracer = _get_tracer(instance)
with tracer.trace("responder.render_template", span_type=SpanTypes.TEMPLATE):
return wrapped(*args, **kwargs)
|
3,050 | def test_type_error_complex_index_non_scalar_return():
# GH 31605
def fct(group):
return group[1].values.flatten()
df = pd.DataFrame([("A", 1), ("A", 2), ("A", 3), ("B", 4), ("B", 5), ("C", np.nan)])
result = df.groupby(0).apply(fct)
expected = pd.Series(
[[1.0, 2.0, 3.0], [4.0, 5.0], [np.nan]], index=pd.Index(list("ABC"), name=0)
)
tm.assert_series_equal(result, expected)
| def test_apply_function_returns_numpy_array():
# GH 31605
def fct(group):
return group[1].values.flatten()
df = pd.DataFrame([("A", 1), ("A", 2), ("A", 3), ("B", 4), ("B", 5), ("C", np.nan)])
result = df.groupby(0).apply(fct)
expected = pd.Series(
[[1.0, 2.0, 3.0], [4.0, 5.0], [np.nan]], index=pd.Index(list("ABC"), name=0)
)
tm.assert_series_equal(result, expected)
|
30,555 | def fetch_incidents(service):
last_run = demisto.getLastRun() and demisto.getLastRun()['time']
search_offset = demisto.getLastRun().get('offset', 0)
incidents = []
current_time_for_fetch = datetime.utcnow()
if demisto.get(demisto.params(), 'timezone'):
timezone = demisto.params()['timezone']
current_time_for_fetch = current_time_for_fetch + timedelta(minutes=int(timezone))
now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
if demisto.get(demisto.params(), 'useSplunkTime'):
now = get_current_splunk_time(service)
current_time_for_fetch = datetime.strptime(now, SPLUNK_TIME_FORMAT)
if len(last_run) == 0:
fetch_time_in_minutes = parse_time_to_minutes()
current_time_for_fetch = current_time_for_fetch - timedelta(minutes=fetch_time_in_minutes)
last_run = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
earliest_fetch_time_fieldname = demisto.params().get("earliest_fetch_time_fieldname", "index_earliest")
latest_fetch_time_fieldname = demisto.params().get("latest_fetch_time_fieldname", "index_latest")
kwargs_oneshot = {earliest_fetch_time_fieldname: last_run,
latest_fetch_time_fieldname: now, "count": FETCH_LIMIT, 'offset': search_offset}
searchquery_oneshot = demisto.params()['fetchQuery']
if demisto.get(demisto.params(), 'extractFields'):
extractFields = demisto.params()['extractFields']
extra_raw_arr = extractFields.split(',')
for field in extra_raw_arr:
field_trimmed = field.strip()
searchquery_oneshot = searchquery_oneshot + ' | eval ' + field_trimmed + '=' + field_trimmed
oneshotsearch_results = service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
for item in reader:
inc = notable_to_incident(item)
incidents.append(inc)
demisto.incidents(incidents)
if len(incidents) < FETCH_LIMIT:
demisto.setLastRun({'time': now, 'offset': 0})
else:
demisto.setLastRun({'time': last_run, 'offset': search_offset + FETCH_LIMIT})
| def fetch_incidents(service):
last_run = demisto.getLastRun() and demisto.getLastRun()['time']
search_offset = demisto.getLastRun().get('offset', 0)
incidents = []
current_time_for_fetch = datetime.utcnow()
if demisto.get(demisto.params(), 'timezone'):
timezone = demisto.params()['timezone']
current_time_for_fetch = current_time_for_fetch + timedelta(minutes=int(timezone))
now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
if demisto.get(demisto.params(), 'useSplunkTime'):
now = get_current_splunk_time(service)
current_time_in_splunk = datetime.strptime(now, SPLUNK_TIME_FORMAT)
if len(last_run) == 0:
fetch_time_in_minutes = parse_time_to_minutes()
current_time_for_fetch = current_time_for_fetch - timedelta(minutes=fetch_time_in_minutes)
last_run = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
earliest_fetch_time_fieldname = demisto.params().get("earliest_fetch_time_fieldname", "index_earliest")
latest_fetch_time_fieldname = demisto.params().get("latest_fetch_time_fieldname", "index_latest")
kwargs_oneshot = {earliest_fetch_time_fieldname: last_run,
latest_fetch_time_fieldname: now, "count": FETCH_LIMIT, 'offset': search_offset}
searchquery_oneshot = demisto.params()['fetchQuery']
if demisto.get(demisto.params(), 'extractFields'):
extractFields = demisto.params()['extractFields']
extra_raw_arr = extractFields.split(',')
for field in extra_raw_arr:
field_trimmed = field.strip()
searchquery_oneshot = searchquery_oneshot + ' | eval ' + field_trimmed + '=' + field_trimmed
oneshotsearch_results = service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
for item in reader:
inc = notable_to_incident(item)
incidents.append(inc)
demisto.incidents(incidents)
if len(incidents) < FETCH_LIMIT:
demisto.setLastRun({'time': now, 'offset': 0})
else:
demisto.setLastRun({'time': last_run, 'offset': search_offset + FETCH_LIMIT})
|
31,973 | def convert_to_unicode(s, is_msg_header=True):
global ENCODINGS_TYPES
try:
res = '' # utf encoded result
if is_msg_header: # Mime encoded words used on message headers only
try:
word_mime_encoded = s and MIME_ENCODED_WORD.search(s)
if word_mime_encoded:
word_mime_decoded = mime_decode(word_mime_encoded)
if word_mime_decoded and not MIME_ENCODED_WORD.search(word_mime_decoded):
# ensure decoding was successful
return word_mime_decoded
except Exception as e:
# in case we failed to mine-decode, we continue and try to decode
demisto.debug('Failed decoding mime-encoded string: {}. Will try regular decoding.'.format(str(e)))
for decoded_s, encoding in decode_header(s): # return a list of pairs(decoded, charset)
if encoding:
try:
res += decoded_s.decode(encoding).encode('utf-8')
except UnicodeDecodeError:
demisto.debug('Failed to decode encoded_string')
replace_decoded = decoded_s.decode(encoding, errors='replace').encode('utf-8')
demisto.debug('Decoded string with replace usage {}'.format(replace_decoded))
res += replace_decoded
ENCODINGS_TYPES.add(encoding)
else:
demisto.debug('Could not find the encoding type of the string, Decoded by default with utf-8 ')
res += decoded_s.decode('utf-8', errors='replace').encode('utf-8')
return res.strip()
except Exception:
for file_data in ENCODINGS_TYPES:
try:
s = s.decode(file_data).encode('utf-8').strip()
break
except: # noqa: E722
pass
return s
| def convert_to_unicode(s, is_msg_header=True):
global ENCODINGS_TYPES
try:
res = '' # utf encoded result
if is_msg_header: # Mime encoded words used on message headers only
try:
word_mime_encoded = s and MIME_ENCODED_WORD.search(s)
if word_mime_encoded:
word_mime_decoded = mime_decode(word_mime_encoded)
if word_mime_decoded and not MIME_ENCODED_WORD.search(word_mime_decoded):
# ensure decoding was successful
return word_mime_decoded
except Exception as e:
# in case we failed to mine-decode, we continue and try to decode
demisto.debug('Failed decoding mime-encoded string: {}. Will try regular decoding.'.format(str(e)))
for decoded_s, encoding in decode_header(s): # return a list of pairs(decoded, charset)
if encoding:
try:
res += decoded_s.decode(encoding).encode('utf-8')
except UnicodeDecodeError:
demisto.debug('Failed to decode encoded_string')
replace_decoded = decoded_s.decode(encoding, errors='replace').encode('utf-8')
demisto.debug('Decoded string with replace usage {}'.format(replace_decoded))
res += replace_decoded
ENCODINGS_TYPES.add(encoding)
else:
demisto.debug('Could not find the encoding type of the string, decoding by default with utf-8')
res += decoded_s.decode('utf-8', errors='replace').encode('utf-8')
return res.strip()
except Exception:
for file_data in ENCODINGS_TYPES:
try:
s = s.decode(file_data).encode('utf-8').strip()
break
except: # noqa: E722
pass
return s
|
20,560 | def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# See if there's a configuration file and import those options
if arguments.config is not None:
print('configuring')
with open(arguments.config, 'r') as conf:
_, ext = os.path.splitext(arguments.config)
if ext == '.json':
config = json.load(conf)
if ext == '.yml' or ext == '.yaml':
config = yaml.load(conf, Loader=yaml.Loader)
# Warn people if they're overriding their config file
if len(argv) > 2:
warnings.warn(UserWarning('Using the `-config|-c` flag with additional arguments is discouraged'))
# Check for unsupported arguments
orig_keys = set(vars(arguments).keys())
config_keys = set(config.keys())
if orig_keys != config_keys:
for k in config_keys.difference(orig_keys):
del config[k] # Remove the unknown key
warnings.warn(UserWarning(
'Unknown key "{}" found in your configuration file, ignoring.'.format(k)))
# Update the default to match the config
parser.set_defaults(**config)
# Reparse the arguments
arguments = parser.parse_args(argv)
if arguments.script is None:
parser.error("The -script argument must be provided, either via command-line or via the -config/-c argument.")
# Set up email notifications if desired
do_email = arguments.email_to is not None
if do_email:
email_to = arguments.email_to
if arguments.email_from is not None:
email_from = arguments.email_from
else:
email_from = arguments.email_to
smtp_host, smtp_port = arguments.email_host.split(":")
smtp_port = int(smtp_port)
email_pass = getpass('Please input your email password:\n')
def send_notification(subject, message):
send_email(email_to, email_from,
subject=subject,
message=message,
passwd=email_pass,
smtp_host=smtp_host,
smtp_port=smtp_port)
while True:
send_test = input('Would you like to send a test email to validate your settings? [Y/n]:\n')
if send_test.lower() in ['', 'y', 'n']:
break
else:
print('Please input y or n')
if send_test.lower() in ['', 'y']:
send_notification('sct_run_batch: test notification', 'Looks good')
# Set up output directories and create them if they don't already exist
path_output = os.path.abspath(os.path.expanduser(arguments.path_output))
path_results = os.path.join(path_output, 'results')
path_data_processed = os.path.join(path_output, 'data_processed')
path_log = os.path.join(path_output, 'log')
path_qc = os.path.join(path_output, 'qc')
path_segmanual = os.path.abspath(os.path.expanduser(arguments.path_segmanual))
script = os.path.abspath(os.path.expanduser(arguments.script))
path_data = os.path.abspath(os.path.expanduser(arguments.path_data))
for pth in [path_output, path_results, path_data_processed, path_log, path_qc]:
os.makedirs(pth, exist_ok=True)
# Check that the script can be found
if not os.path.exists(script):
raise FileNotFoundError('Couldn\'t find the script script at {}'.format(script))
# Setup overall log
batch_log = open(os.path.join(path_log, arguments.batch_log), 'w')
# Duplicate init_sct message to batch_log
print('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__), file=batch_log, flush=True)
# Tee IO to batch_log and std(out/err)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = Tee(batch_log, orig_stdout)
sys.stderr = Tee(batch_log, orig_stderr)
def reset_streams():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
# Display OS
print("INFO SYSTEM")
print("-----------")
platform_running = sys.platform
if platform_running.startswith('darwin'):
os_running = 'osx'
elif platform_running.startswith('linux'):
os_running = 'linux'
elif platform_running.startswith('win32'):
os_running = 'windows'
else:
os_running = platform_running
print('OS: ' + os_running + ' (' + platform.platform() + ')')
# Display number of CPU cores
print('CPU cores: Available: {} | Threads used by ITK Programs: {}'.format(multiprocessing.cpu_count(), arguments.itk_threads))
# Display RAM available
print("RAM: Total {} MB | Available {} MB | Used {} MB".format(
int(psutil.virtual_memory().total / 1024 / 1024),
int(psutil.virtual_memory().available / 1024 / 1024),
int(psutil.virtual_memory().used / 1024 / 1024),
))
# Log the current arguments (in yaml because it's cleaner)
print('\nINPUT ARGUMENTS')
print("---------------")
print(yaml.dump(vars(arguments)))
# Display script version info
print("SCRIPT")
print("------")
print("git commit: {}".format(__get_commit(path_to_git_folder=os.path.dirname(script))))
print("git origin: {}".format(__get_git_origin(path_to_git_folder=os.path.dirname(script))))
print("Copying script to output folder...")
if os.path.isdir(script):
print("Input folder is a directory (not a file). Skipping copy.")
else:
try:
# Copy the script and record the new location
script_copy = os.path.abspath(shutil.copy(script, arguments.path_output))
print("{} -> {}".format(script, script_copy))
script = script_copy
except shutil.SameFileError:
print("Input and output folder are the same. Skipping copy.")
pass
print("Setting execute permissions for script file {} ...".format(arguments.script))
script_stat = os.stat(script)
os.chmod(script, script_stat.st_mode | S_IEXEC)
# Display data version info
print("\nDATA")
print("----")
print("git commit: {}".format(__get_commit(path_to_git_folder=path_data)))
print("git origin: {}\n".format(__get_git_origin(path_to_git_folder=path_data)))
# Find subjects and process inclusion/exclusions
subject_dirs = []
subject_flat_dirs = [f for f in os.listdir(path_data) if f.startswith(arguments.subject_prefix)]
for isub in subject_flat_dirs:
# Only consider folders
if os.path.isdir(os.path.join(path_data, isub)):
session_dirs = [f for f in os.listdir(os.path.join(path_data, isub)) if f.startswith('ses-')]
if session_dirs and arguments.ignore_ses is not True:
# There is a 'ses-' subdirectory AND arguments.ignore_ses = False, so we concatenate: e.g. sub-XX/ses-YY
session_dirs.sort()
for isess in session_dirs:
subject_dirs.append(os.path.join(isub, isess))
else:
# Otherwise, consider only 'sub-' directories and don't include 'ses-' subdirectories: e.g. sub-XX
subject_dirs.append(isub)
# Handle inclusion lists
assert not ((arguments.include is not None) and (arguments.include_list is not None)),\
'Only one of `include` and `include-list` can be used'
if arguments.include is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.include, f) is not None]
if arguments.include_list is not None:
# TODO decide if we should warn users if one of their inclusions isn't around
subject_dirs = [f for f in subject_dirs if f in arguments.include_list]
# Handle exclusions
assert not ((arguments.exclude is not None) and (arguments.exclude_list is not None)),\
'Only one of `exclude` and `exclude-list` can be used'
if arguments.exclude is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.exclude, f) is None]
if arguments.exclude_list is not None:
subject_dirs = [f for f in subject_dirs if f not in arguments.exclude_list]
# Determine the number of jobs we can run simultaneously
if arguments.jobs < 1:
jobs = multiprocessing.cpu_count() + arguments.jobs
else:
jobs = arguments.jobs
print("RUNNING")
print("-------")
print("Processing {} subjects in parallel. (Worker processes used: {}).".format(len(subject_dirs), jobs))
# Run the jobs, recording start and end times
start = datetime.datetime.now()
# Trap errors to send an email if a script fails.
try:
with multiprocessing.Pool(jobs) as p:
run_single_dir = functools.partial(run_single,
script=script,
script_args=arguments.script_args,
path_segmanual=path_segmanual,
path_data=path_data,
path_data_processed=path_data_processed,
path_results=path_results,
path_log=path_log,
path_qc=path_qc,
itk_threads=arguments.itk_threads,
continue_on_error=arguments.continue_on_error)
results = list(p.imap(run_single_dir, subject_dirs))
except Exception as e:
if do_email:
message = ('Oh no there has been the following error in your pipeline:\n\n'
'{}'.format(e))
try:
# I consider the multiprocessing error more significant than a potential email error, this
# ensures that the multiprocessing error is signalled.
send_notification('sct_run_batch errored', message)
except Exception:
raise e
raise e
else:
raise e
end = datetime.datetime.now()
# Check for failed subjects
fails = [sd for (sd, ret) in zip(subject_dirs, results) if ret.returncode != 0]
if len(fails) == 0:
status_message = '\nHooray! your batch completed successfully :-)\n'
else:
status_message = ('\nYour batch completed but some subjects may have not completed '
'successfully, please consult the logs for:\n'
'{}\n'.format('\n'.join(fails)))
print(status_message)
# Display timing
duration = end - start
timing_message = ('Started: {} | Ended: {} | Duration: {}\n'.format(
start.strftime('%Hh%Mm%Ss'),
end.strftime('%Hh%Mm%Ss'),
(datetime.datetime.utcfromtimestamp(0) + duration).strftime('%Hh%Mm%Ss')))
print(timing_message)
if do_email:
send_notification('sct_run_batch: Run completed',
status_message + timing_message)
display_open(file=os.path.join(path_qc, "index.html"),
message="To open the Quality Control (QC) report in a web-browser")
if arguments.zip:
file_zip = 'sct_run_batch_{}'.format(time.strftime('%Y%m%d%H%M%S'))
path_tmp = os.path.join(tempfile.mkdtemp(), file_zip)
os.makedirs(os.path.join(path_tmp, file_zip))
for folder in [path_log, path_qc, path_results]:
shutil.copytree(folder, os.path.join(path_tmp, file_zip, os.path.split(folder)[-1]))
shutil.make_archive(os.path.join(path_output, file_zip), 'zip', path_tmp)
shutil.rmtree(path_tmp)
print("\nOutput zip archive: {}.zip".format(os.path.join(path_output, file_zip)))
reset_streams()
batch_log.close()
| def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# See if there's a configuration file and import those options
if arguments.config is not None:
print('configuring')
with open(arguments.config, 'r') as conf:
_, ext = os.path.splitext(arguments.config)
if ext == '.json':
config = json.load(conf)
if ext == '.yml' or ext == '.yaml':
config = yaml.load(conf, Loader=yaml.Loader)
# Warn people if they're overriding their config file
if len(argv) > 2:
warnings.warn(UserWarning('Using the `-config|-c` flag with additional arguments is discouraged'))
# Check for unsupported arguments
orig_keys = set(vars(arguments).keys())
config_keys = set(config.keys())
if orig_keys != config_keys:
for k in config_keys.difference(orig_keys):
del config[k] # Remove the unknown key
warnings.warn(UserWarning(
'Unknown key "{}" found in your configuration file, ignoring.'.format(k)))
# Update the default to match the config
parser.set_defaults(**config)
# Reparse the arguments
arguments = parser.parse_args(argv)
if arguments.script is None:
parser.error("The -script argument must be provided, either via command-line or via the -config/-c argument.")
# Set up email notifications if desired
do_email = arguments.email_to is not None
if do_email:
email_to = arguments.email_to
if arguments.email_from is not None:
email_from = arguments.email_from
else:
email_from = arguments.email_to
smtp_host, smtp_port = arguments.email_host.split(":")
smtp_port = int(smtp_port)
email_pass = getpass('Please input your email password:\n')
def send_notification(subject, message):
send_email(email_to, email_from,
subject=subject,
message=message,
passwd=email_pass,
smtp_host=smtp_host,
smtp_port=smtp_port)
while True:
send_test = input('Would you like to send a test email to validate your settings? [Y/n]:\n')
if send_test.lower() in ['', 'y', 'n']:
break
else:
print('Please input y or n')
if send_test.lower() in ['', 'y']:
send_notification('sct_run_batch: test notification', 'Looks good')
# Set up output directories and create them if they don't already exist
path_output = os.path.abspath(os.path.expanduser(arguments.path_output))
path_results = os.path.join(path_output, 'results')
path_data_processed = os.path.join(path_output, 'data_processed')
path_log = os.path.join(path_output, 'log')
path_qc = os.path.join(path_output, 'qc')
path_segmanual = os.path.abspath(os.path.expanduser(arguments.path_segmanual))
script = os.path.abspath(os.path.expanduser(arguments.script))
path_data = os.path.abspath(os.path.expanduser(arguments.path_data))
for pth in [path_output, path_results, path_data_processed, path_log, path_qc]:
os.makedirs(pth, exist_ok=True)
# Check that the script can be found
if not os.path.exists(script):
raise FileNotFoundError('Couldn\'t find the script script at {}'.format(script))
# Setup overall log
batch_log = open(os.path.join(path_log, arguments.batch_log), 'w')
# Duplicate init_sct message to batch_log
print('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__), file=batch_log, flush=True)
# Tee IO to batch_log and std(out/err)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = Tee(batch_log, orig_stdout)
sys.stderr = Tee(batch_log, orig_stderr)
def reset_streams():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
# Display OS
print("INFO SYSTEM")
print("-----------")
platform_running = sys.platform
if platform_running.startswith('darwin'):
os_running = 'osx'
elif platform_running.startswith('linux'):
os_running = 'linux'
elif platform_running.startswith('win32'):
os_running = 'windows'
else:
os_running = platform_running
print('OS: ' + os_running + ' (' + platform.platform() + ')')
# Display number of CPU cores
print('CPU cores: Available: {} | Threads used by ITK Programs: {}'.format(multiprocessing.cpu_count(), arguments.itk_threads))
# Display RAM available
print("RAM: Total {} MB | Available {} MB | Used {} MB".format(
int(psutil.virtual_memory().total / 1024 / 1024),
int(psutil.virtual_memory().available / 1024 / 1024),
int(psutil.virtual_memory().used / 1024 / 1024),
))
# Log the current arguments (in yaml because it's cleaner)
print('\nINPUT ARGUMENTS')
print("---------------")
print(yaml.dump(vars(arguments)))
# Display script version info
print("SCRIPT")
print("------")
print("git commit: {}".format(__get_commit(path_to_git_folder=os.path.dirname(script))))
print("git origin: {}".format(__get_git_origin(path_to_git_folder=os.path.dirname(script))))
print("Copying script to output folder...")
if os.path.isdir(script):
print("Input folder is a directory (not a file). Skipping copy.")
else:
try:
# Copy the script and record the new location
script_copy = os.path.abspath(shutil.copy(script, arguments.path_output))
print("{} -> {}".format(script, script_copy))
script = script_copy
except shutil.SameFileError:
print("Input and output folder are the same. Skipping copy.")
pass
print("Setting execute permissions for script file {} ...".format(arguments.script))
script_stat = os.stat(script)
os.chmod(script, script_stat.st_mode | S_IEXEC)
# Display data version info
print("\nDATA")
print("----")
print("git commit: {}".format(__get_commit(path_to_git_folder=path_data)))
print("git origin: {}\n".format(__get_git_origin(path_to_git_folder=path_data)))
# Find subjects and process inclusion/exclusions
subject_dirs = []
subject_flat_dirs = [f for f in os.listdir(path_data) if f.startswith(arguments.subject_prefix)]
for isub in subject_flat_dirs:
# Only consider folders
if os.path.isdir(os.path.join(path_data, isub)):
session_dirs = [f for f in os.listdir(os.path.join(path_data, isub)) if f.startswith('ses-')]
if session_dirs and not arguments.ignore_ses:
# There is a 'ses-' subdirectory AND arguments.ignore_ses = False, so we concatenate: e.g. sub-XX/ses-YY
session_dirs.sort()
for isess in session_dirs:
subject_dirs.append(os.path.join(isub, isess))
else:
# Otherwise, consider only 'sub-' directories and don't include 'ses-' subdirectories: e.g. sub-XX
subject_dirs.append(isub)
# Handle inclusion lists
assert not ((arguments.include is not None) and (arguments.include_list is not None)),\
'Only one of `include` and `include-list` can be used'
if arguments.include is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.include, f) is not None]
if arguments.include_list is not None:
# TODO decide if we should warn users if one of their inclusions isn't around
subject_dirs = [f for f in subject_dirs if f in arguments.include_list]
# Handle exclusions
assert not ((arguments.exclude is not None) and (arguments.exclude_list is not None)),\
'Only one of `exclude` and `exclude-list` can be used'
if arguments.exclude is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.exclude, f) is None]
if arguments.exclude_list is not None:
subject_dirs = [f for f in subject_dirs if f not in arguments.exclude_list]
# Determine the number of jobs we can run simultaneously
if arguments.jobs < 1:
jobs = multiprocessing.cpu_count() + arguments.jobs
else:
jobs = arguments.jobs
print("RUNNING")
print("-------")
print("Processing {} subjects in parallel. (Worker processes used: {}).".format(len(subject_dirs), jobs))
# Run the jobs, recording start and end times
start = datetime.datetime.now()
# Trap errors to send an email if a script fails.
try:
with multiprocessing.Pool(jobs) as p:
run_single_dir = functools.partial(run_single,
script=script,
script_args=arguments.script_args,
path_segmanual=path_segmanual,
path_data=path_data,
path_data_processed=path_data_processed,
path_results=path_results,
path_log=path_log,
path_qc=path_qc,
itk_threads=arguments.itk_threads,
continue_on_error=arguments.continue_on_error)
results = list(p.imap(run_single_dir, subject_dirs))
except Exception as e:
if do_email:
message = ('Oh no there has been the following error in your pipeline:\n\n'
'{}'.format(e))
try:
# I consider the multiprocessing error more significant than a potential email error, this
# ensures that the multiprocessing error is signalled.
send_notification('sct_run_batch errored', message)
except Exception:
raise e
raise e
else:
raise e
end = datetime.datetime.now()
# Check for failed subjects
fails = [sd for (sd, ret) in zip(subject_dirs, results) if ret.returncode != 0]
if len(fails) == 0:
status_message = '\nHooray! your batch completed successfully :-)\n'
else:
status_message = ('\nYour batch completed but some subjects may have not completed '
'successfully, please consult the logs for:\n'
'{}\n'.format('\n'.join(fails)))
print(status_message)
# Display timing
duration = end - start
timing_message = ('Started: {} | Ended: {} | Duration: {}\n'.format(
start.strftime('%Hh%Mm%Ss'),
end.strftime('%Hh%Mm%Ss'),
(datetime.datetime.utcfromtimestamp(0) + duration).strftime('%Hh%Mm%Ss')))
print(timing_message)
if do_email:
send_notification('sct_run_batch: Run completed',
status_message + timing_message)
display_open(file=os.path.join(path_qc, "index.html"),
message="To open the Quality Control (QC) report in a web-browser")
if arguments.zip:
file_zip = 'sct_run_batch_{}'.format(time.strftime('%Y%m%d%H%M%S'))
path_tmp = os.path.join(tempfile.mkdtemp(), file_zip)
os.makedirs(os.path.join(path_tmp, file_zip))
for folder in [path_log, path_qc, path_results]:
shutil.copytree(folder, os.path.join(path_tmp, file_zip, os.path.split(folder)[-1]))
shutil.make_archive(os.path.join(path_output, file_zip), 'zip', path_tmp)
shutil.rmtree(path_tmp)
print("\nOutput zip archive: {}.zip".format(os.path.join(path_output, file_zip)))
reset_streams()
batch_log.close()
|
55,418 | def run(
uri,
entry_point="main",
version=None,
parameters=None,
docker_args=None,
experiment_name=None,
experiment_id=None,
backend="local",
backend_config=None,
use_conda=None,
storage_dir=None,
synchronous=True,
run_id=None,
run_name=None,
env_manager=None,
tracking_uri=None, # Inserted at the bottom for backwards compatibility
):
"""
Run an MLflow project. The project can be local or stored at a Git URI.
MLflow provides built-in support for running projects locally or remotely on a Databricks or
Kubernetes cluster. You can also run projects against other targets by installing an appropriate
third-party plugin. See `Community Plugins <../plugins.html#community-plugins>`_ for more
information.
For information on using this method in chained workflows, see `Building Multistep Workflows
<../projects.html#building-multistep-workflows>`_.
:raises: :py:class:`mlflow.exceptions.ExecutionException` If a run launched in blocking mode
is unsuccessful.
:param uri: URI of project to run. A local filesystem path
or a Git repository URI (e.g. https://github.com/mlflow/mlflow-example)
pointing to a project directory containing an MLproject file.
:param entry_point: Entry point to run within the project. If no entry point with the specified
name is found, runs the project file ``entry_point`` as a script,
using "python" to run ``.py`` files and the default shell (specified by
environment variable ``$SHELL``) to run ``.sh`` files.
:param version: For Git-based projects, either a commit hash or a branch name.
:param parameters: Parameters (dictionary) for the entry point command.
:param docker_args: Arguments (dictionary) for the docker command.
:param experiment_name: Name of experiment under which to launch the run.
:param experiment_id: ID of experiment under which to launch the run.
:param backend: Execution backend for the run: MLflow provides built-in support for "local",
"databricks", and "kubernetes" (experimental) backends. If running against
Databricks, will run against a Databricks workspace determined as follows:
if a Databricks tracking URI of the form ``databricks://profile`` has been set
(e.g. by setting the MLFLOW_TRACKING_URI environment variable), will run
against the workspace specified by <profile>. Otherwise, runs against the
workspace specified by the default Databricks CLI profile.
:param backend_config: A dictionary, or a path to a JSON file (must end in '.json'), which will
be passed as config to the backend. The exact content which should be
provided is different for each execution backend and is documented
at https://www.mlflow.org/docs/latest/projects.html.
:param use_conda: This argument is deprecated. Use `env_manager='local'` instead.
If True (the default), create a new Conda environment for the run and
install project dependencies within that environment. Otherwise, run the
project in the current environment without installing any project
dependencies.
:param storage_dir: Used only if ``backend`` is "local". MLflow downloads artifacts from
distributed URIs passed to parameters of type ``path`` to subdirectories of
``storage_dir``.
:param synchronous: Whether to block while waiting for a run to complete. Defaults to True.
Note that if ``synchronous`` is False and ``backend`` is "local", this
method will return, but the current process will block when exiting until
the local run completes. If the current process is interrupted, any
asynchronous runs launched via this method will be terminated. If
``synchronous`` is True and the run fails, the current process will
error out as well.
:param run_id: Note: this argument is used internally by the MLflow project APIs and should
not be specified. If specified, the run ID will be used instead of
creating a new run.
:param run_name: The name to give the MLflow Run associated with the project execution.
If ``None``, the MLflow Run name is left unset.
:param env_manager: Specify an environment manager to create a new environment for the run and
install project dependencies within that environment. The following values
are supported:
- local: use the local environment
- conda: use conda
- virtualenv: use virtualenv (and pyenv for Python version management)
If unspecified, default to conda.
:param tracking_uri: The uri which the tracking is supposed to interact with.
If ``None``, the tracking will be against local mlflow server.
If environment variable ``MLFLOW_TRACKING_URI`` is set, it will be used by default.
:return: :py:class:`mlflow.projects.SubmittedRun` exposing information (e.g. run ID)
about the launched run.
.. code-block:: python
:caption: Example
import mlflow
project_uri = "https://github.com/mlflow/mlflow-example"
params = {"alpha": 0.5, "l1_ratio": 0.01}
# Run MLflow project and create a reproducible conda environment
# on a local host
mlflow.run(project_uri, parameters=params)
.. code-block:: text
:caption: Output
...
...
Elasticnet model (alpha=0.500000, l1_ratio=0.010000):
RMSE: 0.788347345611717
MAE: 0.6155576449938276
R2: 0.19729662005412607
... mlflow.projects: === Run (ID '6a5109febe5e4a549461e149590d0a7c') succeeded ===
"""
backend_config_dict = backend_config if backend_config is not None else {}
if (
backend_config
and type(backend_config) != dict
and os.path.splitext(backend_config)[-1] == ".json"
):
with open(backend_config, "r") as handle:
try:
backend_config_dict = json.load(handle)
except ValueError:
_logger.error(
"Error when attempting to load and parse JSON cluster spec from file %s",
backend_config,
)
raise
if use_conda is not None and env_manager is not None:
raise MlflowException.invalid_parameter_value(
"`use_conda` cannot be used with `env_manager`"
)
elif use_conda is not None:
warnings.warn(
"`use_conda` is deprecated and will be removed in a future release. "
"Use `env_manager=local` instead",
FutureWarning,
stacklevel=2,
)
env_manager = _EnvManager.CONDA if use_conda else _EnvManager.LOCAL
elif env_manager is not None:
_EnvManager.validate(env_manager)
if backend == "databricks":
mlflow.projects.databricks.before_run_validations(mlflow.get_tracking_uri(), backend_config)
else:
if backend == "local" and run_id is not None:
backend_config_dict[MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG] = run_id
if tracking_uri is not None:
if _resolve_tracking_uri(tracking_uri) is not None:
mlflow.set_tracking_uri(_resolve_tracking_uri(tracking_uri))
experiment_id = _resolve_experiment_id(
experiment_name=experiment_name, experiment_id=experiment_id
)
submitted_run_obj = _run(
uri=uri,
experiment_id=experiment_id,
entry_point=entry_point,
version=version,
parameters=parameters,
docker_args=docker_args,
backend_name=backend,
backend_config=backend_config_dict,
env_manager=env_manager,
storage_dir=storage_dir,
synchronous=synchronous,
run_name=run_name,
)
if synchronous:
_wait_for(submitted_run_obj)
return submitted_run_obj
| def run(
uri,
entry_point="main",
version=None,
parameters=None,
docker_args=None,
experiment_name=None,
experiment_id=None,
backend="local",
backend_config=None,
use_conda=None,
storage_dir=None,
synchronous=True,
run_id=None,
run_name=None,
env_manager=None,
tracking_uri=None, # Inserted at the bottom for backwards compatibility
):
"""
Run an MLflow project. The project can be local or stored at a Git URI.
MLflow provides built-in support for running projects locally or remotely on a Databricks or
Kubernetes cluster. You can also run projects against other targets by installing an appropriate
third-party plugin. See `Community Plugins <../plugins.html#community-plugins>`_ for more
information.
For information on using this method in chained workflows, see `Building Multistep Workflows
<../projects.html#building-multistep-workflows>`_.
:raises: :py:class:`mlflow.exceptions.ExecutionException` If a run launched in blocking mode
is unsuccessful.
:param uri: URI of project to run. A local filesystem path
or a Git repository URI (e.g. https://github.com/mlflow/mlflow-example)
pointing to a project directory containing an MLproject file.
:param entry_point: Entry point to run within the project. If no entry point with the specified
name is found, runs the project file ``entry_point`` as a script,
using "python" to run ``.py`` files and the default shell (specified by
environment variable ``$SHELL``) to run ``.sh`` files.
:param version: For Git-based projects, either a commit hash or a branch name.
:param parameters: Parameters (dictionary) for the entry point command.
:param docker_args: Arguments (dictionary) for the docker command.
:param experiment_name: Name of experiment under which to launch the run.
:param experiment_id: ID of experiment under which to launch the run.
:param backend: Execution backend for the run: MLflow provides built-in support for "local",
"databricks", and "kubernetes" (experimental) backends. If running against
Databricks, will run against a Databricks workspace determined as follows:
if a Databricks tracking URI of the form ``databricks://profile`` has been set
(e.g. by setting the MLFLOW_TRACKING_URI environment variable), will run
against the workspace specified by <profile>. Otherwise, runs against the
workspace specified by the default Databricks CLI profile.
:param backend_config: A dictionary, or a path to a JSON file (must end in '.json'), which will
be passed as config to the backend. The exact content which should be
provided is different for each execution backend and is documented
at https://www.mlflow.org/docs/latest/projects.html.
:param use_conda: This argument is deprecated. Use `env_manager='local'` instead.
If True (the default), create a new Conda environment for the run and
install project dependencies within that environment. Otherwise, run the
project in the current environment without installing any project
dependencies.
:param storage_dir: Used only if ``backend`` is "local". MLflow downloads artifacts from
distributed URIs passed to parameters of type ``path`` to subdirectories of
``storage_dir``.
:param synchronous: Whether to block while waiting for a run to complete. Defaults to True.
Note that if ``synchronous`` is False and ``backend`` is "local", this
method will return, but the current process will block when exiting until
the local run completes. If the current process is interrupted, any
asynchronous runs launched via this method will be terminated. If
``synchronous`` is True and the run fails, the current process will
error out as well.
:param run_id: Note: this argument is used internally by the MLflow project APIs and should
not be specified. If specified, the run ID will be used instead of
creating a new run.
:param run_name: The name to give the MLflow Run associated with the project execution.
If ``None``, the MLflow Run name is left unset.
:param env_manager: Specify an environment manager to create a new environment for the run and
install project dependencies within that environment. The following values
are supported:
- local: use the local environment
- conda: use conda
- virtualenv: use virtualenv (and pyenv for Python version management)
If unspecified, default to conda.
:param tracking_uri: The uri which the tracking is supposed to interact with.
If ``None``, the currently configured tracking URI reflected by
:py:func:`mlflow.get_tracking_uri()` is used by default.
:return: :py:class:`mlflow.projects.SubmittedRun` exposing information (e.g. run ID)
about the launched run.
.. code-block:: python
:caption: Example
import mlflow
project_uri = "https://github.com/mlflow/mlflow-example"
params = {"alpha": 0.5, "l1_ratio": 0.01}
# Run MLflow project and create a reproducible conda environment
# on a local host
mlflow.run(project_uri, parameters=params)
.. code-block:: text
:caption: Output
...
...
Elasticnet model (alpha=0.500000, l1_ratio=0.010000):
RMSE: 0.788347345611717
MAE: 0.6155576449938276
R2: 0.19729662005412607
... mlflow.projects: === Run (ID '6a5109febe5e4a549461e149590d0a7c') succeeded ===
"""
backend_config_dict = backend_config if backend_config is not None else {}
if (
backend_config
and type(backend_config) != dict
and os.path.splitext(backend_config)[-1] == ".json"
):
with open(backend_config, "r") as handle:
try:
backend_config_dict = json.load(handle)
except ValueError:
_logger.error(
"Error when attempting to load and parse JSON cluster spec from file %s",
backend_config,
)
raise
if use_conda is not None and env_manager is not None:
raise MlflowException.invalid_parameter_value(
"`use_conda` cannot be used with `env_manager`"
)
elif use_conda is not None:
warnings.warn(
"`use_conda` is deprecated and will be removed in a future release. "
"Use `env_manager=local` instead",
FutureWarning,
stacklevel=2,
)
env_manager = _EnvManager.CONDA if use_conda else _EnvManager.LOCAL
elif env_manager is not None:
_EnvManager.validate(env_manager)
if backend == "databricks":
mlflow.projects.databricks.before_run_validations(mlflow.get_tracking_uri(), backend_config)
else:
if backend == "local" and run_id is not None:
backend_config_dict[MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG] = run_id
if tracking_uri is not None:
if _resolve_tracking_uri(tracking_uri) is not None:
mlflow.set_tracking_uri(_resolve_tracking_uri(tracking_uri))
experiment_id = _resolve_experiment_id(
experiment_name=experiment_name, experiment_id=experiment_id
)
submitted_run_obj = _run(
uri=uri,
experiment_id=experiment_id,
entry_point=entry_point,
version=version,
parameters=parameters,
docker_args=docker_args,
backend_name=backend,
backend_config=backend_config_dict,
env_manager=env_manager,
storage_dir=storage_dir,
synchronous=synchronous,
run_name=run_name,
)
if synchronous:
_wait_for(submitted_run_obj)
return submitted_run_obj
|
43,229 | def test_batch():
assert list(Batch()) == []
assert Batch().is_empty()
assert not Batch(b={'c': {}}).is_empty()
assert Batch(b={'c': {}}).is_empty(recurse=True)
assert not Batch(a=Batch(), b=Batch(c=Batch())).is_empty()
assert Batch(a=Batch(), b=Batch(c=Batch())).is_empty(recurse=True)
assert not Batch(d=1).is_empty()
assert not Batch(a=np.float64(1.0)).is_empty()
assert len(Batch(a=[1, 2, 3], b={'c': {}})) == 3
assert not Batch(a=[1, 2, 3]).is_empty()
b = Batch()
b.update()
assert b.is_empty()
b.update(c=[3, 5])
assert np.allclose(b.c, [3, 5])
# mimic the behavior of dict.update, where kwargs can overwrite keys
b.update({'a': 2}, a=3)
assert b.a == 3 and 'a' in b
assert b.pop('a') == 3
assert 'a' not in b
with pytest.raises(AssertionError):
Batch({1: 2})
with pytest.raises(TypeError):
Batch(a=[np.zeros((2, 3)), np.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[np.zeros((3, 2)), np.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[torch.zeros((2, 3)), torch.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[torch.zeros((3, 3)), np.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[1, np.zeros((3, 3)), torch.zeros((3, 3))])
batch = Batch(a=[torch.ones(3), torch.ones(3)])
assert torch.allclose(batch.a, torch.ones(2, 3))
batch.cat_(batch)
assert torch.allclose(batch.a, torch.ones(4, 3))
Batch(a=[])
batch = Batch(obs=[0], np=np.zeros([3, 4]))
assert batch.obs == batch["obs"]
batch.obs = [1]
assert batch.obs == [1]
batch.cat_(batch)
assert np.allclose(batch.obs, [1, 1])
assert batch.np.shape == (6, 4)
assert np.allclose(batch[0].obs, batch[1].obs)
batch.obs = np.arange(5)
for i, b in enumerate(batch.split(1, shuffle=False)):
if i != 5:
assert b.obs == batch[i].obs
else:
with pytest.raises(AttributeError):
batch[i].obs
with pytest.raises(AttributeError):
b.obs
print(batch)
batch_dict = {'b': np.array([1.0]), 'c': 2.0, 'd': torch.Tensor([3.0])}
batch_item = Batch({'a': [batch_dict]})[0]
assert isinstance(batch_item.a.b, np.ndarray)
assert batch_item.a.b == batch_dict['b']
assert isinstance(batch_item.a.c, float)
assert batch_item.a.c == batch_dict['c']
assert isinstance(batch_item.a.d, torch.Tensor)
assert batch_item.a.d == batch_dict['d']
batch2 = Batch(a=[{
'b': np.float64(1.0),
'c': np.zeros(1),
'd': Batch(e=np.array(3.0))}])
assert len(batch2) == 1
assert Batch().shape == []
assert Batch(a=1).shape == []
assert Batch(a=set((1, 2, 1))).shape == []
assert batch2.shape[0] == 1
assert 'a' in batch2 and all([i in batch2.a for i in 'bcd'])
with pytest.raises(IndexError):
batch2[-2]
with pytest.raises(IndexError):
batch2[1]
assert batch2[0].shape == []
with pytest.raises(IndexError):
batch2[0][0]
with pytest.raises(TypeError):
len(batch2[0])
assert isinstance(batch2[0].a.c, np.ndarray)
assert isinstance(batch2[0].a.b, np.float64)
assert isinstance(batch2[0].a.d.e, np.float64)
batch2_from_list = Batch(list(batch2))
batch2_from_comp = Batch([e for e in batch2])
assert batch2_from_list.a.b == batch2.a.b
assert batch2_from_list.a.c == batch2.a.c
assert batch2_from_list.a.d.e == batch2.a.d.e
assert batch2_from_comp.a.b == batch2.a.b
assert batch2_from_comp.a.c == batch2.a.c
assert batch2_from_comp.a.d.e == batch2.a.d.e
for batch_slice in [
batch2[slice(0, 1)], batch2[:1], batch2[0:]]:
assert batch_slice.a.b == batch2.a.b
assert batch_slice.a.c == batch2.a.c
assert batch_slice.a.d.e == batch2.a.d.e
batch2.a.d.f = {}
batch2_sum = (batch2 + 1.0) * 2
assert batch2_sum.a.b == (batch2.a.b + 1.0) * 2
assert batch2_sum.a.c == (batch2.a.c + 1.0) * 2
assert batch2_sum.a.d.e == (batch2.a.d.e + 1.0) * 2
assert batch2_sum.a.d.f.is_empty()
with pytest.raises(TypeError):
batch2 += [1]
batch3 = Batch(a={
'c': np.zeros(1),
'd': Batch(e=np.array([0.0]), f=np.array([3.0]))})
batch3.a.d[0] = {'e': 4.0}
assert batch3.a.d.e[0] == 4.0
batch3.a.d[0] = Batch(f=5.0)
assert batch3.a.d.f[0] == 5.0
with pytest.raises(KeyError):
batch3.a.d[0] = Batch(f=5.0, g=0.0)
# auto convert
batch4 = Batch(a=np.array(['a', 'b']))
assert batch4.a.dtype == np.object # auto convert to np.object
batch4.update(a=np.array(['c', 'd']))
assert list(batch4.a) == ['c', 'd']
assert batch4.a.dtype == np.object # auto convert to np.object
batch5 = Batch(a=np.array([{'index': 0}]))
assert isinstance(batch5.a, Batch)
assert np.allclose(batch5.a.index, [0])
batch5.b = np.array([{'index': 1}])
assert isinstance(batch5.b, Batch)
assert np.allclose(batch5.b.index, [1])
# None is a valid object and can be stored in Batch
a = Batch.stack([Batch(a=None), Batch(b=None)])
assert a.a[0] is None and a.a[1] is None
assert a.b[0] is None and a.b[1] is None
| def test_batch():
assert list(Batch()) == []
assert Batch().is_empty()
assert not Batch(b={'c': {}}).is_empty()
assert Batch(b={'c': {}}).is_empty(recurse=True)
assert not Batch(a=Batch(), b=Batch(c=Batch())).is_empty()
assert Batch(a=Batch(), b=Batch(c=Batch())).is_empty(recurse=True)
assert not Batch(d=1).is_empty()
assert not Batch(a=np.float64(1.0)).is_empty()
assert len(Batch(a=[1, 2, 3], b={'c': {}})) == 3
assert not Batch(a=[1, 2, 3]).is_empty()
b = Batch()
b.update()
assert b.is_empty()
b.update(c=[3, 5])
assert np.allclose(b.c, [3, 5])
# mimic the behavior of dict.update, where kwargs can overwrite keys
b.update({'a': 2}, a=3)
assert 'a' in b and b.a == 3
assert b.pop('a') == 3
assert 'a' not in b
with pytest.raises(AssertionError):
Batch({1: 2})
with pytest.raises(TypeError):
Batch(a=[np.zeros((2, 3)), np.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[np.zeros((3, 2)), np.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[torch.zeros((2, 3)), torch.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[torch.zeros((3, 3)), np.zeros((3, 3))])
with pytest.raises(TypeError):
Batch(a=[1, np.zeros((3, 3)), torch.zeros((3, 3))])
batch = Batch(a=[torch.ones(3), torch.ones(3)])
assert torch.allclose(batch.a, torch.ones(2, 3))
batch.cat_(batch)
assert torch.allclose(batch.a, torch.ones(4, 3))
Batch(a=[])
batch = Batch(obs=[0], np=np.zeros([3, 4]))
assert batch.obs == batch["obs"]
batch.obs = [1]
assert batch.obs == [1]
batch.cat_(batch)
assert np.allclose(batch.obs, [1, 1])
assert batch.np.shape == (6, 4)
assert np.allclose(batch[0].obs, batch[1].obs)
batch.obs = np.arange(5)
for i, b in enumerate(batch.split(1, shuffle=False)):
if i != 5:
assert b.obs == batch[i].obs
else:
with pytest.raises(AttributeError):
batch[i].obs
with pytest.raises(AttributeError):
b.obs
print(batch)
batch_dict = {'b': np.array([1.0]), 'c': 2.0, 'd': torch.Tensor([3.0])}
batch_item = Batch({'a': [batch_dict]})[0]
assert isinstance(batch_item.a.b, np.ndarray)
assert batch_item.a.b == batch_dict['b']
assert isinstance(batch_item.a.c, float)
assert batch_item.a.c == batch_dict['c']
assert isinstance(batch_item.a.d, torch.Tensor)
assert batch_item.a.d == batch_dict['d']
batch2 = Batch(a=[{
'b': np.float64(1.0),
'c': np.zeros(1),
'd': Batch(e=np.array(3.0))}])
assert len(batch2) == 1
assert Batch().shape == []
assert Batch(a=1).shape == []
assert Batch(a=set((1, 2, 1))).shape == []
assert batch2.shape[0] == 1
assert 'a' in batch2 and all([i in batch2.a for i in 'bcd'])
with pytest.raises(IndexError):
batch2[-2]
with pytest.raises(IndexError):
batch2[1]
assert batch2[0].shape == []
with pytest.raises(IndexError):
batch2[0][0]
with pytest.raises(TypeError):
len(batch2[0])
assert isinstance(batch2[0].a.c, np.ndarray)
assert isinstance(batch2[0].a.b, np.float64)
assert isinstance(batch2[0].a.d.e, np.float64)
batch2_from_list = Batch(list(batch2))
batch2_from_comp = Batch([e for e in batch2])
assert batch2_from_list.a.b == batch2.a.b
assert batch2_from_list.a.c == batch2.a.c
assert batch2_from_list.a.d.e == batch2.a.d.e
assert batch2_from_comp.a.b == batch2.a.b
assert batch2_from_comp.a.c == batch2.a.c
assert batch2_from_comp.a.d.e == batch2.a.d.e
for batch_slice in [
batch2[slice(0, 1)], batch2[:1], batch2[0:]]:
assert batch_slice.a.b == batch2.a.b
assert batch_slice.a.c == batch2.a.c
assert batch_slice.a.d.e == batch2.a.d.e
batch2.a.d.f = {}
batch2_sum = (batch2 + 1.0) * 2
assert batch2_sum.a.b == (batch2.a.b + 1.0) * 2
assert batch2_sum.a.c == (batch2.a.c + 1.0) * 2
assert batch2_sum.a.d.e == (batch2.a.d.e + 1.0) * 2
assert batch2_sum.a.d.f.is_empty()
with pytest.raises(TypeError):
batch2 += [1]
batch3 = Batch(a={
'c': np.zeros(1),
'd': Batch(e=np.array([0.0]), f=np.array([3.0]))})
batch3.a.d[0] = {'e': 4.0}
assert batch3.a.d.e[0] == 4.0
batch3.a.d[0] = Batch(f=5.0)
assert batch3.a.d.f[0] == 5.0
with pytest.raises(KeyError):
batch3.a.d[0] = Batch(f=5.0, g=0.0)
# auto convert
batch4 = Batch(a=np.array(['a', 'b']))
assert batch4.a.dtype == np.object # auto convert to np.object
batch4.update(a=np.array(['c', 'd']))
assert list(batch4.a) == ['c', 'd']
assert batch4.a.dtype == np.object # auto convert to np.object
batch5 = Batch(a=np.array([{'index': 0}]))
assert isinstance(batch5.a, Batch)
assert np.allclose(batch5.a.index, [0])
batch5.b = np.array([{'index': 1}])
assert isinstance(batch5.b, Batch)
assert np.allclose(batch5.b.index, [1])
# None is a valid object and can be stored in Batch
a = Batch.stack([Batch(a=None), Batch(b=None)])
assert a.a[0] is None and a.a[1] is None
assert a.b[0] is None and a.b[1] is None
|
4,549 | def create_fake_bids_dataset(base_dir='', n_sub=10, n_ses=2,
tasks=['localizer', 'main'],
n_runs=[1, 3], with_derivatives=True,
with_confounds=True,
confounds_tag="desc-confounds_timeseries",
no_session=False):
"""Creates a fake bids dataset directory with dummy files.
Returns fake dataset directory name.
Parameters
----------
base_dir: string (Absolute path), optional
Absolute directory path in which to create the fake BIDS dataset dir.
Default: Current directory.
n_sub: int, optional
Number of subject to be simulated in the dataset.
Default: 10
n_ses: int, optional
Number of sessions to be simulated in the dataset.
Ignored if no_session=True.
Default: 2
n_runs: List[int], optional
Default: [1, 3]
with_derivatives: bool, optional
In the case derivatives are included, they come with two spaces and
descriptions. Spaces are 'MNI' and 'T1w'. Descriptions are 'preproc'
and 'fmriprep'. Only space 'T1w' include both descriptions.
Default: True
with_confounds: bool, optional
Default: True
confounds_tag: string (filename suffix), optional
If generating confounds, what path shoul they have? Defaults to
`desc-confounds_timeseries` as in `fmriprep` >= 20.2 but can be other
values (e.g. "desc-confounds_regressors" as in `fmriprep` < 20.2)
Default: "desc-confounds_timeseries"
no_session: bool, optional
Specifying no_sessions will only produce runs and files without the
optional session field. In this case n_ses will be ignored.
Default: False
Returns
-------
dataset directory name: string
'bids_dataset'
Creates
-------
Directory with dummy files
"""
bids_path = os.path.join(base_dir, 'bids_dataset')
os.makedirs(bids_path)
# Create surface bids dataset
open(os.path.join(bids_path, 'README.txt'), 'w')
vox = 4
created_sessions = ['ses-%02d' % label for label in range(1, n_ses + 1)]
if no_session:
created_sessions = ['']
for subject in ['sub-%02d' % label for label in range(1, n_sub + 1)]:
for session in created_sessions:
subses_dir = os.path.join(bids_path, subject, session)
if session == 'ses-01' or session == '':
anat_path = os.path.join(subses_dir, 'anat')
os.makedirs(anat_path)
anat_file = os.path.join(anat_path, subject + '_T1w.nii.gz')
open(anat_file, 'w')
func_path = os.path.join(subses_dir, 'func')
os.makedirs(func_path)
for task, n_run in zip(tasks, n_runs):
run_labels = [
'run-%02d' % label for label in range(1, n_run + 1)]
for run in run_labels:
fields = [subject, session, 'task-' + task]
if '' in fields:
fields.remove('')
file_id = '_'.join(fields)
if n_run > 1:
file_id += '_' + run
bold_path = os.path.join(func_path,
file_id + '_bold.nii.gz')
write_fake_bold_img(bold_path, [vox, vox, vox, 100])
events_path = os.path.join(func_path,
file_id + '_events.tsv')
basic_paradigm().to_csv(events_path,
sep='\t',
index=None)
param_path = os.path.join(func_path,
file_id + '_bold.json')
with open(param_path, 'w') as param_file:
json.dump({'RepetitionTime': 1.5}, param_file)
# Create derivatives files
if with_derivatives:
bids_path = os.path.join(base_dir, 'bids_dataset', 'derivatives')
os.makedirs(bids_path)
for subject in ['sub-%02d' % label for label in range(1, 11)]:
for session in created_sessions:
subses_dir = os.path.join(bids_path, subject, session)
func_path = os.path.join(subses_dir, 'func')
os.makedirs(func_path)
for task, n_run in zip(tasks, n_runs):
for run in ['run-%02d' % label
for label in range(1, n_run + 1)
]:
fields = [subject, session, 'task-' + task]
if '' in fields:
fields.remove('')
file_id = '_'.join(fields)
if n_run > 1:
file_id += '_' + run
preproc = (
file_id + '_space-MNI_desc-preproc_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
preproc = (
file_id + '_space-T1w_desc-preproc_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
preproc = (
file_id + '_space-T1w_desc-fmriprep_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
if with_confounds:
confounds_path = os.path.join(
func_path,
file_id + '_' + confounds_tag + '.tsv',
)
basic_confounds(100).to_csv(confounds_path,
sep='\t', index=None)
return 'bids_dataset'
| def create_fake_bids_dataset(base_dir='', n_sub=10, n_ses=2,
tasks=['localizer', 'main'],
n_runs=[1, 3], with_derivatives=True,
with_confounds=True,
confounds_tag="desc-confounds_timeseries",
no_session=False):
"""Creates a fake bids dataset directory with dummy files.
Returns fake dataset directory name.
Parameters
----------
base_dir: string (Absolute path), optional
Absolute directory path in which to create the fake BIDS dataset dir.
Default: Current directory.
n_sub: int, optional
Number of subject to be simulated in the dataset.
Default: 10
n_ses: int, optional
Number of sessions to be simulated in the dataset.
Ignored if no_session=True.
Default: 2
n_runs: List[int], optional
Default: [1, 3]
with_derivatives: bool, optional
In the case derivatives are included, they come with two spaces and
descriptions. Spaces are 'MNI' and 'T1w'. Descriptions are 'preproc'
and 'fmriprep'. Only space 'T1w' include both descriptions.
Default: True
with_confounds: bool, optional
Default: True
confounds_tag: string (filename suffix), optional
If generating confounds, what path should they have? Defaults to
`desc-confounds_timeseries` as in `fmriprep` >= 20.2 but can be other
values (e.g. "desc-confounds_regressors" as in `fmriprep` < 20.2)
Default: "desc-confounds_timeseries"
no_session: bool, optional
Specifying no_sessions will only produce runs and files without the
optional session field. In this case n_ses will be ignored.
Default: False
Returns
-------
dataset directory name: string
'bids_dataset'
Creates
-------
Directory with dummy files
"""
bids_path = os.path.join(base_dir, 'bids_dataset')
os.makedirs(bids_path)
# Create surface bids dataset
open(os.path.join(bids_path, 'README.txt'), 'w')
vox = 4
created_sessions = ['ses-%02d' % label for label in range(1, n_ses + 1)]
if no_session:
created_sessions = ['']
for subject in ['sub-%02d' % label for label in range(1, n_sub + 1)]:
for session in created_sessions:
subses_dir = os.path.join(bids_path, subject, session)
if session == 'ses-01' or session == '':
anat_path = os.path.join(subses_dir, 'anat')
os.makedirs(anat_path)
anat_file = os.path.join(anat_path, subject + '_T1w.nii.gz')
open(anat_file, 'w')
func_path = os.path.join(subses_dir, 'func')
os.makedirs(func_path)
for task, n_run in zip(tasks, n_runs):
run_labels = [
'run-%02d' % label for label in range(1, n_run + 1)]
for run in run_labels:
fields = [subject, session, 'task-' + task]
if '' in fields:
fields.remove('')
file_id = '_'.join(fields)
if n_run > 1:
file_id += '_' + run
bold_path = os.path.join(func_path,
file_id + '_bold.nii.gz')
write_fake_bold_img(bold_path, [vox, vox, vox, 100])
events_path = os.path.join(func_path,
file_id + '_events.tsv')
basic_paradigm().to_csv(events_path,
sep='\t',
index=None)
param_path = os.path.join(func_path,
file_id + '_bold.json')
with open(param_path, 'w') as param_file:
json.dump({'RepetitionTime': 1.5}, param_file)
# Create derivatives files
if with_derivatives:
bids_path = os.path.join(base_dir, 'bids_dataset', 'derivatives')
os.makedirs(bids_path)
for subject in ['sub-%02d' % label for label in range(1, 11)]:
for session in created_sessions:
subses_dir = os.path.join(bids_path, subject, session)
func_path = os.path.join(subses_dir, 'func')
os.makedirs(func_path)
for task, n_run in zip(tasks, n_runs):
for run in ['run-%02d' % label
for label in range(1, n_run + 1)
]:
fields = [subject, session, 'task-' + task]
if '' in fields:
fields.remove('')
file_id = '_'.join(fields)
if n_run > 1:
file_id += '_' + run
preproc = (
file_id + '_space-MNI_desc-preproc_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
preproc = (
file_id + '_space-T1w_desc-preproc_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
preproc = (
file_id + '_space-T1w_desc-fmriprep_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
if with_confounds:
confounds_path = os.path.join(
func_path,
file_id + '_' + confounds_tag + '.tsv',
)
basic_confounds(100).to_csv(confounds_path,
sep='\t', index=None)
return 'bids_dataset'
|
45,203 | def file_open(file_path, mode="rb", kwargs=None):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
try:
return s3fs.open(file_path)
except NoCredentialsError:
s3fs = S3FS.S3FileSystem(anon=True)
return s3fs.open(file_path)
elif "compression" in kwargs:
if kwargs["compression"] == "gzip":
import gzip
return gzip.open(file_path, mode=mode)
return open(file_path, mode=mode)
| def file_open(file_path, mode="rb", kwargs=None):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
try:
return s3fs.open(file_path)
except NoCredentialsError:
s3fs = S3FS.S3FileSystem(anon=True)
return s3fs.open(file_path)
elif compression == "gzip":
if kwargs["compression"] == "gzip":
import gzip
return gzip.open(file_path, mode=mode)
return open(file_path, mode=mode)
|
56,415 | def get_other_source_component_file_query(session):
""" Get filter query for the auto-generated Others component.
If there are no user defined source components in the database this
function will return with None.
The returned query will look like this:
(Files NOT IN Component_1) AND (Files NOT IN Component_2) ... AND
(Files NOT IN Component_N)
"""
component_names = session.query(SourceComponent.name).all()
# If there are no user defined source components we don't have to filter.
if not component_names:
return None
def get_query(component_name: str):
""" Get file filter query for auto generated Other component. """
skip, include = get_component_values(session, component_name)
if skip and include:
include_q, skip_q = get_include_skip_queries(skip, include)
return File.id.notin_(include_q.except_(skip_q))
elif include:
return and_(*[File.filepath.notlike(conv(fp)) for fp in include])
elif skip:
return or_(*[File.filepath.like(conv(fp)) for fp in skip])
queries = [get_query(n) for (n, ) in component_names]
return and_(*queries)
| def get_other_source_component_file_query(session):
""" Get filter query for the auto-generated Others component.
If there are no user defined source components in the database this
function will return with None.
The returned query will look like this:
(Files NOT LIKE Component_1) AND (Files NOT LIKE Component_2) ... AND
(Files NOT LIKE Component_N)
"""
component_names = session.query(SourceComponent.name).all()
# If there are no user defined source components we don't have to filter.
if not component_names:
return None
def get_query(component_name: str):
""" Get file filter query for auto generated Other component. """
skip, include = get_component_values(session, component_name)
if skip and include:
include_q, skip_q = get_include_skip_queries(skip, include)
return File.id.notin_(include_q.except_(skip_q))
elif include:
return and_(*[File.filepath.notlike(conv(fp)) for fp in include])
elif skip:
return or_(*[File.filepath.like(conv(fp)) for fp in skip])
queries = [get_query(n) for (n, ) in component_names]
return and_(*queries)
|
31,656 | def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
| def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abnormal-security-list-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
21,206 | def test_prex_builder_script_from_pex_path(tmpdir):
# type: (Any) -> None
pex_with_script = os.path.join(str(tmpdir), "script.pex")
with built_wheel(
name="my_project",
entry_points={"console_scripts": ["my_app = my_project.my_module:do_something"]},
) as my_whl:
pb = PEXBuilder()
pb.add_dist_location(my_whl)
pb.build(pex_with_script)
pex_file = os.path.join(str(tmpdir), "app.pex")
pb = PEXBuilder()
pb.info.pex_path = pex_with_script
pb.set_script("my_app")
pb.build(pex_file)
assert "hello world!\n" == subprocess.check_output(args=[pex_file]).decode("utf-8")
| def test_pex_builder_script_from_pex_path(tmpdir):
# type: (Any) -> None
pex_with_script = os.path.join(str(tmpdir), "script.pex")
with built_wheel(
name="my_project",
entry_points={"console_scripts": ["my_app = my_project.my_module:do_something"]},
) as my_whl:
pb = PEXBuilder()
pb.add_dist_location(my_whl)
pb.build(pex_with_script)
pex_file = os.path.join(str(tmpdir), "app.pex")
pb = PEXBuilder()
pb.info.pex_path = pex_with_script
pb.set_script("my_app")
pb.build(pex_file)
assert "hello world!\n" == subprocess.check_output(args=[pex_file]).decode("utf-8")
|
12,232 | def s3_server(xprocess):
"""
Mock a local S3 server using `minio`
This requires:
- pytest-xprocess: runs the background process
- minio: the executable must be in PATH
Note it will be given EMPTY! The test function needs
to populate it. You can use
`conda.testing.helpers.populate_s3_server` for that.
"""
# The 'name' below will be the name of the S3 bucket containing
# keys like `noarch/repodata.json`
NAME = "s3_server"
PORT = 9000
from xprocess import ProcessStarter
temp = tempfile.TemporaryDirectory()
(Path(temp.name) / NAME).mkdir()
print("Starting mock_s3_server")
class Starter(ProcessStarter):
pattern = "https://docs.min.io"
terminate_on_interrupt = True
timeout = 10
args = [
"minio",
"server",
f"--address=:{PORT}",
str(temp.name),
]
def startup_check(self, port=PORT):
s = socket.socket()
address = "localhost"
error = False
try:
s.connect((address, port))
except Exception as e:
print("something's wrong with %s:%d. Exception is %s" % (address, port, e))
error = True
finally:
s.close()
return not error
xprocess.ensure(NAME, Starter)
yield f"http://localhost:{PORT}/{NAME}"
xprocess.getinfo(NAME).terminate()
| def s3_server(xprocess):
"""
Mock a local S3 server using `minio`
This requires:
- pytest-xprocess: runs the background process
- minio: the executable must be in PATH
Note it will be given EMPTY! The test function needs
to populate it. You can use
`conda.testing.helpers.populate_s3_server` for that.
"""
# The 'name' below will be the name of the S3 bucket containing
# keys like `noarch/repodata.json`
NAME = "s3_server"
PORT = 9000
from xprocess import ProcessStarter
minio = Minio()
print("Starting mock_s3_server")
class Starter(ProcessStarter):
pattern = "https://docs.min.io"
terminate_on_interrupt = True
timeout = 10
args = [
"minio",
"server",
f"--address=:{PORT}",
str(temp.name),
]
def startup_check(self, port=PORT):
s = socket.socket()
address = "localhost"
error = False
try:
s.connect((address, port))
except Exception as e:
print("something's wrong with %s:%d. Exception is %s" % (address, port, e))
error = True
finally:
s.close()
return not error
xprocess.ensure(NAME, Starter)
yield f"http://localhost:{PORT}/{NAME}"
xprocess.getinfo(NAME).terminate()
|
1,617 | def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
elif isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' "
"it is a class rather than instance of a class "
% (repr(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError('Cannot clone object %s, as the constructor '
'either does not set or modifies parameter %s' %
(estimator, name))
return new_object
| def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
elif isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s': "
"it is a class rather than instance of a class "
% (repr(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError('Cannot clone object %s, as the constructor '
'either does not set or modifies parameter %s' %
(estimator, name))
return new_object
|
31,831 | def get_used_dockers_images() -> CommandResults:
md = None
active_docker_list_integration = {}
active_docker_list_automation = {}
result_dict: Dict[str, List[str]] = {}
active_integration_instances = demisto.internalHttpRequest(POST_COMMAND, "%s" % SETTING_INTEGRATION_SEARCH,
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(
f"called demisto.internalHttpRequest(\"{POST_COMMAND}\", \"{SETTING_INTEGRATION_SEARCH}\", "
f"\"{REQUEST_INTEGRATION_SEARCH_BODY}\")")
demisto.debug(f'respose code = {0}', active_integration_instances['statusCode'])
if active_integration_instances and active_integration_instances['statusCode'] == 200:
active_docker_list_integration = extract_dockers_from_integration_search_result(
active_integration_instances['body'])
active_automation_instances = demisto.internalHttpRequest(POST_COMMAND, "%s" % AUTOMATION_SEARCH,
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(f"called demisto.internalHttpRequest(\"{POST_COMMAND}\", \"{AUTOMATION_SEARCH}\", "
f"\"{REQUEST_INTEGRATION_SEARCH_BODY}\")")
demisto.debug(f'respose code = {0}', active_automation_instances['statusCode'])
if active_automation_instances and active_automation_instances['statusCode'] == 200:
active_docker_list_automation = extract_dockers_from_automation_search_result(
active_automation_instances['body'])
result_dict = merge_result(active_docker_list_integration, result_dict, MAX_PER_DOCKER)
result_dict = merge_result(active_docker_list_automation, result_dict, MAX_PER_DOCKER)
''' format the result for Markdown view'''
result_output = []
result_output = format_result_for_markdown(result_dict)
md = tableToMarkdown('Dockers Images In use:', result_output, )
return CommandResults(readable_output=md)
| def get_used_dockers_images() -> CommandResults:
md = None
active_docker_list_integration = {}
active_docker_list_automation = {}
result_dict: Dict[str, List[str]] = {}
active_integration_instances = demisto.internalHttpRequest(POST_COMMAND, "%s" % SETTING_INTEGRATION_SEARCH,
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(
f"called demisto.internalHttpRequest(\"{POST_COMMAND}\", \"{SETTING_INTEGRATION_SEARCH}\", "
f"\"{REQUEST_INTEGRATION_SEARCH_BODY}\")")
demisto.debug(f'respose code = {0}', active_integration_instances['statusCode'])
if active_integration_instances and active_integration_instances['statusCode'] == 200:
active_docker_list_integration = extract_dockers_from_integration_search_result(
active_integration_instances['body'])
active_automation_instances = demisto.internalHttpRequest(POST_COMMAND, "%s" % AUTOMATION_SEARCH,
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(f"called demisto.internalHttpRequest(\"{POST_COMMAND}\", \"{AUTOMATION_SEARCH}\", "
f"\"{REQUEST_INTEGRATION_SEARCH_BODY}\")")
demisto.debug(f'respose code = {0}', active_automation_instances['statusCode'])
if active_automation_instances and active_automation_instances['statusCode'] == 200:
active_docker_list_automation = extract_dockers_from_automation_search_result(
active_automation_instances['body'])
result_dict = merge_result(active_docker_list_integration, result_dict, MAX_PER_DOCKER)
result_dict = merge_result(active_docker_list_automation, result_dict, MAX_PER_DOCKER)
''' format the result for Markdown view'''
result_output = []
result_output = format_result_for_markdown(result_dict)
md = tableToMarkdown('Dockers Images In use:', result_output)
return CommandResults(readable_output=md)
|
8,660 | def main(argv=None):
try:
# Step One: Parse The Command Line
parser = build_parser()
opts = parser.parse_args(argv or None)
# Step Two: "Do not run as root" checks
try:
check_not_root()
except RuntimeError as err:
stderr('%s' % err)
return 1
# Step Three: No-config required options
if opts.version:
print_version()
return
if opts.wizard:
_wizard('all', opts.config)
return
if opts.mod_wizard:
_wizard('mod', opts.config)
return
if opts.list_configs:
print_config()
return
# Step Four: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except ConfigurationError as e:
stderr(e)
return 2
if config_module.core.not_configured:
stderr('Bot is not configured, can\'t start')
# exit with code 2 to prevent auto restart on fail by systemd
return 2
# Step Five: Manage logfile, stdout and stderr
logfile = os.path.os.path.join(config_module.core.logdir, 'stdio.log')
sys.stderr = tools.OutputRedirect(logfile, True, opts.quiet)
sys.stdout = tools.OutputRedirect(logfile, False, opts.quiet)
# Step Six: Handle --quit, --kill and saving the PID to file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
old_pid = get_running_pid(pid_file_path)
if old_pid is not None and tools.check_pid(old_pid):
if not opts.quit and not opts.kill:
stderr('There\'s already a Sopel instance running with this config file')
stderr('Try using the --quit or the --kill options')
return 1
elif opts.kill:
stderr('Killing the sopel')
os.kill(old_pid, signal.SIGKILL)
return
elif opts.quit:
stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(old_pid, signal.SIGUSR1)
else:
os.kill(old_pid, signal.SIGTERM)
return
elif opts.kill or opts.quit:
stderr('Sopel is not running!')
return 1
if opts.daemonize:
child_pid = os.fork()
if child_pid is not 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Seven: Initialise And Run sopel
run(config_module, pid_file_path)
except KeyboardInterrupt:
print("\n\nInterrupted")
return 1
| def main(argv=None):
try:
# Step One: Parse The Command Line
parser = build_parser()
opts = parser.parse_args(argv or None)
# Step Two: "Do not run as root" checks
try:
check_not_root()
except RuntimeError as err:
stderr('%s' % err)
return 1
# Step Three: No-config required options
if opts.version:
print_version()
return
if opts.wizard:
_wizard('all', opts.config)
return
if opts.mod_wizard:
_wizard('mod', opts.config)
return
if opts.list_configs:
print_config()
return
# Step Four: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except ConfigurationError as e:
stderr(e)
return 2
if config_module.core.not_configured:
stderr('Bot is not configured, can\'t start')
# exit with code 2 to prevent auto restart on fail by systemd
return 2
# Step Five: Manage logfile, stdout and stderr
logfile = os.path.os.path.join(config_module.core.logdir, 'stdio.log')
sys.stderr = tools.OutputRedirect(logfile, True, opts.quiet)
sys.stdout = tools.OutputRedirect(logfile, False, opts.quiet)
# Step Six: Handle --quit, --kill and saving the PID to file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
old_pid = get_running_pid(pid_file_path)
if old_pid is not None and tools.check_pid(old_pid):
if not opts.quit and not opts.kill:
stderr('There\'s already a Sopel instance running with this config file')
stderr('Try using the --quit or the --kill options')
return 1
elif opts.kill:
stderr('Killing the sopel')
os.kill(old_pid, signal.SIGKILL)
return
elif opts.quit:
stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(old_pid, signal.SIGUSR1)
else:
os.kill(old_pid, signal.SIGTERM)
return
elif opts.kill or opts.quit:
stderr('Sopel is not running!')
return 1
if opts.daemonize:
child_pid = os.fork()
if child_pid is not 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Seven: Initialize and run Sopel
run(config_module, pid_file_path)
except KeyboardInterrupt:
print("\n\nInterrupted")
return 1
|
8,427 | def test_create_with_spectral_coord():
spectral_coord = SpectralCoord(np.arange(5100, 5150)*u.AA, radial_velocity = u.Quantity(1000.0, "km/s"))
flux = np.random.randn(50)*u.Jy
spec = Spectrum1D(spectral_axis = spectral_coord, flux = flux)
assert spec.radial_velocity == u.Quantity(1000.0, "km/s")
assert isinstance(spec.spectral_axis, SpectralCoord)
assert spec.spectral_axis.size == 50
| def test_create_with_spectral_coord():
spectral_coord = SpectralCoord(np.arange(5100, 5150)*u.AA, radial_velocity = u.Quantity(1000.0, "km/s"))
flux = np.random.randn(50)*u.Jy
spec = Spectrum1D(spectral_axis = spectral_coord, flux=flux)
assert spec.radial_velocity == u.Quantity(1000.0, "km/s")
assert isinstance(spec.spectral_axis, SpectralCoord)
assert spec.spectral_axis.size == 50
|
31,990 | def cve_to_context(cve) -> Dict[str, str]:
"""Returning a cve structure with the following fields:
* ID: The cve ID.
* CVSS: The cve score scale/
* Published: The date the cve was published.
* Modified: The date the cve was modified.
* Description: the cve's description
Args:
cve: The cve response from CVE-Search web site
Returns:
The cve structure.
"""
cvss = cve.get('cvss')
return {
'ID': cve.get('id', ''),
'CVSS': cvss if cvss else 'N\\A',
'Published': cve.get('Published', '').rstrip('Z'),
'Modified': cve.get('Modified', '').rstrip('Z'),
'Description': cve.get('summary', '')
}
| def cve_to_context(cve) -> Dict[str, str]:
"""Returning a cve structure with the following fields:
* ID: The cve ID.
* CVSS: The cve score scale/
* Published: The date the cve was published.
* Modified: The date the cve was modified.
* Description: the cve's description
Args:
cve: The cve response from CVE-Search web site
Returns:
The cve structure.
"""
cvss = cve.get('cvss')
return {
'ID': cve.get('id', ''),
'CVSS': cvss or 'N\\A',
'Published': cve.get('Published', '').rstrip('Z'),
'Modified': cve.get('Modified', '').rstrip('Z'),
'Description': cve.get('summary', '')
}
|
31,814 | def item_purchase_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, None]:
try:
item_id = str(args.get('item_id'))
bot_id = ''
room_id = ''
# Get mentions list:
mentions_list_res = client.get_mention_list()
if isinstance(mentions_list_res, dict) and isinstance(mentions_list_res.get('data', ''), list) \
and len(mentions_list_res['data']) > 0 and isinstance(mentions_list_res['data'][0], dict) \
and 'id' in mentions_list_res['data'][0] and 'alias' in mentions_list_res['data'][0]:
mentions_list = mentions_list_res['data']
# Fetch some important item data.
item_res = client.get_item(item_id=item_id)
if isinstance(item_res, dict) and isinstance(item_res.get('data', ''), dict):
# Prevent execution on unsupported sub types.
if item_res['data'].get('sub_type', -1) not in SUPPORTED_SUB_TYPES_FOR_PURCHASE:
raise Exception("Sub type not supported for purchasing!")
# Extract bot ID and incident ID.
incident_id = item_res['data'].get('feed_property_id', '')
if not incident_id:
raise Exception("Item ID doesn't found!")
bot_id = item_res['data'].get('bot_id', '')
if not bot_id:
raise Exception("Bot ID doesn't found!")
# Check if chat room already exists.
incident_res = client.incident_get_items(incident_id=incident_id)
if isinstance(incident_res, dict) and isinstance(incident_res.get('data', ''), dict) and \
isinstance(incident_res['data'].get('chat', ''), dict) and \
isinstance(incident_res['data']['chat'].get('room', ''), dict):
room_id = incident_res['data']['chat']['room'].get('id', '')
# Send the action status.
action_res = client.action_on_item(item_id=item_id, action="request")
if isinstance(action_res, dict) and isinstance(action_res.get('data', ''), dict) \
and action_res['data'].get('value', ''):
# Send the chat request.
message = {
"text": "Hi <b>@KELA</b> , I would like to acquire further details about bot: " + bot_id,
"mentionsList": mentions_list
}
room = {"itemId": incident_id, "itemType": "FEED_PROPERTY"}
if room_id:
room['id'] = room_id
# Send message.
message_res = client.message_on_incident(message=message, room=room)
if isinstance(message_res, dict) and isinstance(message_res.get('data', ''), dict) \
and message_res['data'].get('roomId', ''):
# readable_output = 'Item marked for purchasing'
readable_output = 'Bot ID (' + bot_id + ') marked for purchasing'
else:
raise Exception("Action failed!")
else:
raise Exception("Action failed!")
else:
readable_output = f'No data found for item ID: {incident_id}'
else:
raise Exception("Mentions list doesn't found!")
return CommandResults(readable_output=readable_output)
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute command on {item_id}.\nError:\n {str(e)}')
return None
| def item_purchase_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, None]:
try:
item_id = str(args.get('item_id'))
bot_id = ''
room_id = ''
# Get mentions list:
mentions_list_res = client.get_mention_list()
if isinstance(mentions_list_res, dict) and isinstance(mentions_list_res.get('data', ''), list) \
and len(mentions_list_res['data']) > 0 and isinstance(mentions_list_res['data'][0], dict) \
and 'id' in mentions_list_res['data'][0] and 'alias' in mentions_list_res['data'][0]:
mentions_list = mentions_list_res['data']
# Fetch some important item data.
item_res = client.get_item(item_id=item_id)
if isinstance(item_res, dict) and isinstance(item_res.get('data', ''), dict):
# Prevent execution on unsupported sub types.
if item_res['data'].get('sub_type', -1) not in SUPPORTED_SUB_TYPES_FOR_PURCHASE:
raise Exception("RaDark Error: Sub type not supported for purchasing!")
# Extract bot ID and incident ID.
incident_id = item_res['data'].get('feed_property_id', '')
if not incident_id:
raise Exception("Item ID doesn't found!")
bot_id = item_res['data'].get('bot_id', '')
if not bot_id:
raise Exception("Bot ID doesn't found!")
# Check if chat room already exists.
incident_res = client.incident_get_items(incident_id=incident_id)
if isinstance(incident_res, dict) and isinstance(incident_res.get('data', ''), dict) and \
isinstance(incident_res['data'].get('chat', ''), dict) and \
isinstance(incident_res['data']['chat'].get('room', ''), dict):
room_id = incident_res['data']['chat']['room'].get('id', '')
# Send the action status.
action_res = client.action_on_item(item_id=item_id, action="request")
if isinstance(action_res, dict) and isinstance(action_res.get('data', ''), dict) \
and action_res['data'].get('value', ''):
# Send the chat request.
message = {
"text": "Hi <b>@KELA</b> , I would like to acquire further details about bot: " + bot_id,
"mentionsList": mentions_list
}
room = {"itemId": incident_id, "itemType": "FEED_PROPERTY"}
if room_id:
room['id'] = room_id
# Send message.
message_res = client.message_on_incident(message=message, room=room)
if isinstance(message_res, dict) and isinstance(message_res.get('data', ''), dict) \
and message_res['data'].get('roomId', ''):
# readable_output = 'Item marked for purchasing'
readable_output = 'Bot ID (' + bot_id + ') marked for purchasing'
else:
raise Exception("Action failed!")
else:
raise Exception("Action failed!")
else:
readable_output = f'No data found for item ID: {incident_id}'
else:
raise Exception("Mentions list doesn't found!")
return CommandResults(readable_output=readable_output)
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute command on {item_id}.\nError:\n {str(e)}')
return None
|
44,113 | def compare_measurements(meas1, meas2):
"""
Helper function to compare measurements
"""
assert meas1.return_type.name == meas2.return_type.name
obs1 = meas1.obs
obs2 = meas2.obs
assert np.array(obs1.name == obs2.name).all()
assert set(obs1.wires.tolist()) == set(obs2.wires.tolist())
| def compare_measurements(meas1, meas2):
"""
Helper function to compare measurements
"""
assert meas1.return_type.name == meas2.return_type.name
obs1 = meas1.obs
obs2 = meas2.obs
assert np.array(obs1.name == obs2.name).all()
assert set(obs1.wires) == set(obs2.wires)
|
29,894 | def cluster_pool_from_config(
app_config: config.RawConfig, prefix: str = "rediscluster.", **kwargs: Any
) -> rediscluster.ClusterConnectionPool:
"""Make a ClusterConnectionPool from a configuration dictionary.
The keys useful to :py:func:`clusterpool_from_config` should be prefixed, e.g.
``rediscluster.url``, ``rediscluster.max_connections``, etc. The ``prefix`` argument
specifies the prefix used to filter keys. Each key is mapped to a
corresponding keyword argument on the :py:class:`redis.ConnectionPool`
constructor.
Supported keys:
* ``url`` (required): a URL like ``redis://localhost/0``.
* ``max_connections``: an integer maximum number of connections in the pool
* ``timeout``: how long to wait for sockets to connect. e.g.
``200 milliseconds`` (:py:func:`~baseplate.lib.config.Timespan`)
"""
assert prefix.endswith(".")
parser = config.SpecParser(
{
"startup_nodes": config.TupleOf(config.String),
"max_connections": config.Optional(config.Integer, default=None),
"timeout": config.Optional(config.Timespan, default=None),
}
)
options = parser.parse(prefix[:-1], app_config)
nodes = [
{"host": node[0], "port": node[1]}
for node in [startup_node.split(":") for startup_node in options.startup_nodes]
]
kwargs["startup_nodes"] = nodes
if options.max_connections is not None:
kwargs.setdefault("max_connections", options.max_connections)
if options.timeout is not None:
kwargs.setdefault("timeout", options.timeout.total_seconds())
return rediscluster.ClusterBlockingConnectionPool.from_url(options.url, **kwargs)
| def cluster_pool_from_config(
app_config: config.RawConfig, prefix: str = "rediscluster.", **kwargs: Any
) -> rediscluster.ClusterConnectionPool:
"""Make a ClusterConnectionPool from a configuration dictionary.
The keys useful to :py:func:`clusterpool_from_config` should be prefixed, e.g.
``rediscluster.url``, ``rediscluster.max_connections``, etc. The ``prefix`` argument
specifies the prefix used to filter keys. Each key is mapped to a
corresponding keyword argument on the :py:class:`redis.ConnectionPool`
constructor.
Supported keys:
* ``url`` (required): a URL like ``redis://localhost/0``.
* ``max_connections``: an integer maximum number of connections in the pool
* ``timeout``: how long to wait for sockets to connect. e.g.
``200 milliseconds`` (:py:func:`~baseplate.lib.config.Timespan`)
"""
assert prefix.endswith(".")
parser = config.SpecParser(
{
"startup_nodes": config.TupleOf(config.Endpoint),
"max_connections": config.Optional(config.Integer, default=None),
"timeout": config.Optional(config.Timespan, default=None),
}
)
options = parser.parse(prefix[:-1], app_config)
nodes = [
{"host": node[0], "port": node[1]}
for node in [startup_node.split(":") for startup_node in options.startup_nodes]
]
kwargs["startup_nodes"] = nodes
if options.max_connections is not None:
kwargs.setdefault("max_connections", options.max_connections)
if options.timeout is not None:
kwargs.setdefault("timeout", options.timeout.total_seconds())
return rediscluster.ClusterBlockingConnectionPool.from_url(options.url, **kwargs)
|
10,833 | def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object;
- take arguments that need refcount except array;
- call function that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyound the lifetime of the
function.
The rewrite performs inplace.
If rewrite has happen, this function return True. Otherwise, return False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
| def remove_unnecessary_nrt_usage(function, context, fndesc):
"""
Remove unnecessary NRT incref/decref in the given LLVM function.
It uses highlevel type info to determine if the function does not need NRT.
Such a function does not:
- return array object;
- take arguments that need refcount except array;
- call function that return refcounted object.
In effect, the function will not capture or create references that extend
the lifetime of any refcounted objects beyound the lifetime of the
function.
The rewrite performs inplace.
If rewrite has happened, this function returns True, otherwise, it returns False.
"""
dmm = context.data_model_manager
if _legalize(function.module, dmm, fndesc):
_rewrite_function(function)
return True
else:
return False
|
43,138 | def traverse_dir(
directory,
topdown=True,
ignore=None,
only=None,
recursive=True,
include_subdir=True,
):
"""
Recursively traverse all files and sub directories in a directory and
get a list of relative paths.
:param directory: Path to a directory that will be traversed.
:type directory: ``str``
:param topdown: Browse the directory in a top-down or bottom-up approach.
:type topdown: ``bool``
:param ignore: list of patterns to ignore by glob style filtering.
:type ignore: ``list``
:param only: list of patterns to exclusively consider by glob style
filtering.
:type only: ``list``
:param recursive: Traverse through all sub directories recursively.
:type recursive: ``bool``
:param include_subdir: Include all sub directories and files if True, or
exclude directories in the result.
:type include_subdir: ``bool``
:return: A list of relative file paths
:rtype: ``list`` of ``str``
"""
result = []
ignore = ignore or []
only = only or []
def should_ignore(filename):
"""Decide if a file should be ignored by its name."""
for pattern in ignore:
if fnmatch.fnmatch(filename, pattern):
return True
if only:
for pattern in only:
if fnmatch.fnmatch(filename, pattern):
return False
else:
return True
return False
for dirpath, dirnames, filenames in os.walk(
directory, topdown=topdown or recursive is False
):
if include_subdir:
for dname in dirnames:
if not should_ignore(dname):
dpath = os.path.join(dirpath, dname)
_, _, relpath = dpath.partition(directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
for fname in filenames:
if not should_ignore(fname):
fpath = os.path.join(dirpath, fname)
_, _, relpath = fpath.partition(directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
if recursive is False:
break
return result
| def traverse_dir(
directory,
topdown=True,
ignore=None,
only=None,
recursive=True,
include_subdir=True,
):
"""
Recursively traverse all files and sub directories in a directory and
get a list of relative paths.
:param directory: Path to a directory that will be traversed.
:type directory: ``str``
:param topdown: Browse the directory in a top-down or bottom-up approach.
:type topdown: ``bool``
:param ignore: list of patterns to ignore by glob style filtering.
:type ignore: ``list``
:param only: list of patterns to exclusively consider by glob style
filtering.
:type only: ``list``
:param recursive: Traverse through all sub directories recursively.
:type recursive: ``bool``
:param include_subdir: Include all sub directories and files if True, or
exclude directories in the result.
:type include_subdir: ``bool``
:return: A list of relative file paths
:rtype: ``list`` of ``str``
"""
result = []
ignore = ignore or []
only = only or []
def should_ignore(filename):
"""Decide if a file should be ignored by its name."""
for pattern in ignore:
if fnmatch.fnmatch(filename, pattern):
return True
if only:
for pattern in only:
if fnmatch.fnmatch(filename, pattern):
return False
else:
return True
return False
for dirpath, dirnames, filenames in os.walk(
directory, topdown=topdown or recursive is False
):
if include_subdir:
for dname in dirnames:
if not should_ignore(dname):
dpath = os.path.join(dirpath, dname)
_, _, relpath = dpath.partition(directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
for fname in filenames:
if not should_ignore(fname):
fpath = os.path.join(dirpath, fname)
relpath = os.path.relpath(os.path.join(dirpath, fname), directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
if recursive is False:
break
return result
|
12,775 | def test_raid(verbosity):
"""checks all MD arrays on local machine, returns status code"""
raid_devices = find_arrays(verbosity)
status = OK
message = ""
arrays_not_ok = 0
number_arrays = len(raid_devices)
for array in raid_devices:
if verbosity >= 2:
print('Now testing raid device "%s"' % array)
detailed_output = os.popen("%s --detail %s 2>&1" %
(BIN, array)).readlines()
if verbosity >= 3:
for line in detailed_output:
print(line,)
state = "unknown"
for line in detailed_output:
if "State :" in line:
state = line.split(":")[-1][1:-1]
state = state.rstrip()
re_clean = re.compile('^clean|active(,.*)?$')
if re_clean.match(state) and state != "active":
arrays_not_ok += 1
raidlevel = detailed_output[3].split()[-1]
shortname = array.split("/")[-1].upper()
if state == "dirty":
# This happens when the array is under heavy usage but it's \
# normal and the array recovers within seconds
continue
elif "recovering" in state:
extra_info = None
for line in detailed_output:
if "Rebuild Status" in line:
extra_info = line.rstrip()
message += 'Array "%s" is in state ' % shortname
if extra_info:
message += '"%s" (%s) - %s' \
% (state, raidlevel, extra_info)
else:
message += '"%s" (%s)' % (state, raidlevel)
message += ", "
if status == OK:
status = WARNING
elif state == "unknown":
message += 'State of Raid Array "%s" is unknown, ' % shortname
if state == OK:
status = UNKNOWN
else:
message += 'Array %s is in state "%s" (%s), ' \
% (shortname, state, raidlevel)
status = CRITICAL
message = message.rstrip(", ")
if status == OK:
message += "All arrays OK"
else:
if arrays_not_ok == 1:
message = "1 array not ok - " + message
else:
message = "%s arrays not ok - " % arrays_not_ok + message
if number_arrays == 1:
message += " [1 array checked]"
else:
message += " [%s arrays checked]" % number_arrays
return status, message
| def test_raid(verbosity):
"""checks all MD arrays on local machine, returns status code"""
raid_devices = find_arrays(verbosity)
status = OK
message = ""
arrays_not_ok = 0
number_arrays = len(raid_devices)
for array in raid_devices:
if verbosity >= 2:
print('Now testing raid device "%s"' % array)
detailed_output = os.popen("%s --detail %s 2>&1" %
(BIN, array)).readlines()
if verbosity >= 3:
for line in detailed_output:
print(line,)
state = "unknown"
for line in detailed_output:
if "State :" in line:
state = line.split(":")[-1][1:-1]
state = state.rstrip()
re_clean = re.compile('^clean|active(,.*)?$')
if re_clean.match(state) and state != "clean" and state != "active":
arrays_not_ok += 1
raidlevel = detailed_output[3].split()[-1]
shortname = array.split("/")[-1].upper()
if state == "dirty":
# This happens when the array is under heavy usage but it's \
# normal and the array recovers within seconds
continue
elif "recovering" in state:
extra_info = None
for line in detailed_output:
if "Rebuild Status" in line:
extra_info = line.rstrip()
message += 'Array "%s" is in state ' % shortname
if extra_info:
message += '"%s" (%s) - %s' \
% (state, raidlevel, extra_info)
else:
message += '"%s" (%s)' % (state, raidlevel)
message += ", "
if status == OK:
status = WARNING
elif state == "unknown":
message += 'State of Raid Array "%s" is unknown, ' % shortname
if state == OK:
status = UNKNOWN
else:
message += 'Array %s is in state "%s" (%s), ' \
% (shortname, state, raidlevel)
status = CRITICAL
message = message.rstrip(", ")
if status == OK:
message += "All arrays OK"
else:
if arrays_not_ok == 1:
message = "1 array not ok - " + message
else:
message = "%s arrays not ok - " % arrays_not_ok + message
if number_arrays == 1:
message += " [1 array checked]"
else:
message += " [%s arrays checked]" % number_arrays
return status, message
|
34,047 | def test_no_eager_gc_in_equal_splitting_lazy_dataset(ray_start_regular_shared):
ds = (
ray.data.range(100, parallelism=10).map_batches(lambda x: x).experimental_lazy()
)
for batch in ds.iter_batches():
pass
assert ds._lazy
assert not ds._used_from_dataset_pipeline
# Splitting 10 blocks into 3 groups, so there must be a block that will be
# splitted in order to equalize the outputs. However, we should not GC the
# input block even if ds is lazy (we can do that only if ds is used in a
# DatasetPipeline, which is not the case here).
dss = ds.split(3, equal=True)
for split in dss:
split.show()
# This iteration ensures that the blocks of ds still exist.
for batch in ds.iter_batches():
pass
| def test_no_eager_gc_in_equal_splitting_lazy_dataset(ray_start_regular_shared):
ds = (
ray.data.range(100, parallelism=10).map_batches(lambda x: x).experimental_lazy()
)
for batch in ds.iter_batches():
pass
assert ds._lazy
assert not ds._used_from_dataset_pipeline
# Splitting 10 blocks into 3 groups, so there must be a block that will be
# splitted in order to equalize the outputs. However, we should not GC the
# input block even if ds is lazy (we can do that only if ds is used in a
# DatasetPipeline, which is not the case here).
dss = ds.split(3, equal=True)
for split in dss:
split.show()
# This iteration ensures that the blocks of ds still exist.
assert len(list(ds.take_all())) == 100
|
41,038 | def _get_parser():
"""
Parses command line inputs for tedana
Returns
-------
parser.parse_args() : argparse dict
"""
parser = argparse.ArgumentParser()
# Argument parser follow templtate provided by RalphyZ
# https://stackoverflow.com/a/43456577
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-d',
dest='data',
nargs='+',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('Multi-echo dataset for analysis. May be a '
'single file with spatially concatenated data '
'or a set of echo-specific files, in the same '
'order as the TEs are listed in the -e '
'argument.'),
required=True)
required.add_argument('-e',
dest='tes',
nargs='+',
metavar='TE',
type=float,
help='Echo times (in ms). E.g., 15.0 39.0 63.0',
required=True)
optional.add_argument('--mask',
dest='mask',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=("Binary mask of voxels to include in TE "
"Dependent ANAlysis. Must be in the same "
"space as `data`. If an explicit mask is not "
"provided, then Nilearn's compute_epi_mask "
"function will be used to derive a mask "
"from the first echo's data."),
default=None)
optional.add_argument('--mix',
dest='mixm',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing mixing matrix. If not '
'provided, ME-PCA & ME-ICA is done.'),
default=None)
optional.add_argument('--ctab',
dest='ctab',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing a component table from which '
'to extract pre-computed classifications.'),
default=None)
optional.add_argument('--manacc',
dest='manacc',
help=('Comma separated list of manually '
'accepted components'),
default=None)
optional.add_argument('--sourceTEs',
dest='source_tes',
type=str,
help=('Source TEs for models. E.g., 0 for all, '
'-1 for opt. com., and 1,2 for just TEs 1 and '
'2. Default=-1.'),
default=-1)
optional.add_argument('--combmode',
dest='combmode',
action='store',
choices=['t2s'],
help=('Combination scheme for TEs: '
't2s (Posse 1999, default)'),
default='t2s')
optional.add_argument('--verbose',
dest='verbose',
action='store_true',
help='Generate intermediate and additional files.',
default=False)
optional.add_argument('--tedort',
dest='tedort',
action='store_true',
help=('Orthogonalize rejected components w.r.t. '
'accepted components prior to denoising.'),
default=False)
optional.add_argument('--gscontrol',
dest='gscontrol',
required=False,
action='store',
nargs='+',
help=('Perform additional denoising to remove '
'spatially diffuse noise. Default is None. '
'This argument can be single value or a space '
'delimited list'),
choices=['t1c', 'gsr'],
default=None)
optional.add_argument('--tedpca',
dest='tedpca',
help='Method with which to select components in TEDPCA',
choices=['mle', 'kundu', 'kundu-stabilize'],
default='mle')
optional.add_argument('--out-dir',
dest='out_dir',
type=str,
help='Output directory.',
default='.')
optional.add_argument('--seed',
dest='fixed_seed',
type=int,
help=('Value random initialization of ICA algorithm. '
'Set to an integer value for reproducible ICA results. '
'Set to -1 for varying results across ICA calls. '
'Default=42.'),
default=42)
optional.add_argument('--png',
dest='png',
action='store_true',
help=('Creates a figures folder with static component '
'maps, timecourse plots and other diagnostic '
'images'),
default=False)
optional.add_argument('--png-cmap',
dest='png_cmap',
type=str,
help=('Colormap for figures'),
default='coolwarm')
optional.add_argument('--maxit',
dest='maxit',
type=int,
help=('Maximum number of iterations for ICA.'),
default=500)
optional.add_argument('--maxrestart',
dest='maxrestart',
type=int,
help=('Maximum number of attempts for ICA. If ICA '
'fails to converge, the fixed seed will be '
'updated and ICA will be run again. If '
'convergence is achieved before maxrestart '
'attempts, ICA will finish early.'),
default=10)
optional.add_argument('--debug',
dest='debug',
help=argparse.SUPPRESS,
action='store_true',
default=False)
optional.add_argument('--quiet',
dest='quiet',
help=argparse.SUPPRESS,
action='store_true',
default=False)
parser._action_groups.append(optional)
return parser
| def _get_parser():
"""
Parses command line inputs for tedana
Returns
-------
parser.parse_args() : argparse dict
"""
parser = argparse.ArgumentParser()
# Argument parser follow templtate provided by RalphyZ
# https://stackoverflow.com/a/43456577
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-d',
dest='data',
nargs='+',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('Multi-echo dataset for analysis. May be a '
'single file with spatially concatenated data '
'or a set of echo-specific files, in the same '
'order as the TEs are listed in the -e '
'argument.'),
required=True)
required.add_argument('-e',
dest='tes',
nargs='+',
metavar='TE',
type=float,
help='Echo times (in ms). E.g., 15.0 39.0 63.0',
required=True)
optional.add_argument('--mask',
dest='mask',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=("Binary mask of voxels to include in TE "
"Dependent ANAlysis. Must be in the same "
"space as `data`. If an explicit mask is not "
"provided, then Nilearn's compute_epi_mask "
"function will be used to derive a mask "
"from the first echo's data."),
default=None)
optional.add_argument('--mix',
dest='mixm',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing mixing matrix. If not '
'provided, ME-PCA & ME-ICA is done.'),
default=None)
optional.add_argument('--ctab',
dest='ctab',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing a component table from which '
'to extract pre-computed classifications.'),
default=None)
optional.add_argument('--manacc',
dest='manacc',
help=('Comma separated list of manually '
'accepted components'),
default=None)
optional.add_argument('--sourceTEs',
dest='source_tes',
type=str,
help=('Source TEs for models. E.g., 0 for all, '
'-1 for opt. com., and 1,2 for just TEs 1 and '
'2. Default=-1.'),
default=-1)
optional.add_argument('--combmode',
dest='combmode',
action='store',
choices=['t2s'],
help=('Combination scheme for TEs: '
't2s (Posse 1999, default)'),
default='t2s')
optional.add_argument('--verbose',
dest='verbose',
action='store_true',
help='Generate intermediate and additional files.',
default=False)
optional.add_argument('--tedort',
dest='tedort',
action='store_true',
help=('Orthogonalize rejected components w.r.t. '
'accepted components prior to denoising.'),
default=False)
optional.add_argument('--gscontrol',
dest='gscontrol',
required=False,
action='store',
nargs='+',
help=('Perform additional denoising to remove '
'spatially diffuse noise. Default is None. '
'This argument can be single value or a space '
'delimited list'),
choices=['t1c', 'gsr'],
default=None)
optional.add_argument('--tedpca',
dest='tedpca',
help='Method with which to select components in TEDPCA',
choices=['mle', 'kundu', 'kundu-stabilize'],
default='mle')
optional.add_argument('--out-dir',
dest='out_dir',
type=str,
help='Output directory.',
default='.')
optional.add_argument('--seed',
dest='fixed_seed',
type=int,
help=('Value used for random initialization of ICA algorithm. '
'Set to an integer value for reproducible ICA results. '
'Set to -1 for varying results across ICA calls. '
'Default=42.'),
default=42)
optional.add_argument('--png',
dest='png',
action='store_true',
help=('Creates a figures folder with static component '
'maps, timecourse plots and other diagnostic '
'images'),
default=False)
optional.add_argument('--png-cmap',
dest='png_cmap',
type=str,
help=('Colormap for figures'),
default='coolwarm')
optional.add_argument('--maxit',
dest='maxit',
type=int,
help=('Maximum number of iterations for ICA.'),
default=500)
optional.add_argument('--maxrestart',
dest='maxrestart',
type=int,
help=('Maximum number of attempts for ICA. If ICA '
'fails to converge, the fixed seed will be '
'updated and ICA will be run again. If '
'convergence is achieved before maxrestart '
'attempts, ICA will finish early.'),
default=10)
optional.add_argument('--debug',
dest='debug',
help=argparse.SUPPRESS,
action='store_true',
default=False)
optional.add_argument('--quiet',
dest='quiet',
help=argparse.SUPPRESS,
action='store_true',
default=False)
parser._action_groups.append(optional)
return parser
|
27,453 | def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if (
"license" in license.lower()
and "unlicense" not in license.lower()
and "licenseref" not in license.lower()
and "-license" not in license.lower()
):
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", "")
if license_file == "" and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match("^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: noarch doesn't work with selectors for runtime dependencies
if build_section.get("noarch") is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if build_section.get("noarch") is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req.partition(" ")[0] == str(language)
]
filtered_run_reqs = [
req
for req in run_reqs
if req.partition(" ")[0] == str(language)
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have {0} requirement without any version constraints.".format(
str(language)
)
)
# 24: jinja2 variable references should be {{<one space>var<one space>}}
if recipe_dir is not None and os.path.exists(meta_fname):
bad_vars = []
bad_lines = []
with io.open(meta_fname, "rt") as fh:
for i, line in enumerate(fh.readlines()):
for m in JINJA_VAR_PAT.finditer(line):
if m.group(1) is not None:
var = m.group(1)
if var != " %s " % var.strip():
bad_vars.append(m.group(1).strip())
bad_lines.append(i+1)
if bad_vars:
lints.append(
"Jinja2 variable references are suggested to "
"take a ``{{<one space><variable name><one space>}}``"
" form. See lines %s." % (bad_lines,)
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
build_section.get("noarch") is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = get_yaml().load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
# 4: Check for SPDX
import license_expression
license = about_section.get("license", "")
licensing = license_expression.Licensing()
parsed_exceptions = []
try:
parsed_licenses = []
parsed_licenses_with_exception = licensing.license_symbols(
license.strip(), decompose=False
)
for l in parsed_licenses_with_exception:
if isinstance(l, license_expression.LicenseWithExceptionSymbol):
parsed_licenses.append(l.license_symbol.key)
parsed_exceptions.append(l.exception_symbol.key)
else:
parsed_licenses.append(l.key)
except license_expression.ExpressionError:
parsed_licenses = [license]
licenseref_regex = re.compile("^LicenseRef[a-zA-Z0-9\-.]*$")
filtered_licenses = []
for license in parsed_licenses:
if not licenseref_regex.match(license):
filtered_licenses.append(license)
with open(
os.path.join(os.path.dirname(__file__), "licenses.txt"), "r"
) as f:
expected_licenses = f.readlines()
expected_licenses = set([l.strip() for l in expected_licenses])
with open(
os.path.join(os.path.dirname(__file__), "license_exceptions.txt"), "r"
) as f:
expected_exceptions = f.readlines()
expected_exceptions = set([l.strip() for l in expected_exceptions])
if set(filtered_licenses) - expected_licenses:
hints.append(
"License is not an SPDX identifier (or a custom LicenseRef) nor an SPDX license expression."
)
if set(parsed_exceptions) - expected_exceptions:
hints.append("License exception is not an SPDX exception.")
return lints, hints
| def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if (
"license" in license.lower()
and "unlicense" not in license.lower()
and "licenseref" not in license.lower()
and "-license" not in license.lower()
):
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", "")
if license_file == "" and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match("^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: noarch doesn't work with selectors for runtime dependencies
if build_section.get("noarch") is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if build_section.get("noarch") is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req.partition(" ")[0] == str(language)
]
filtered_run_reqs = [
req
for req in run_reqs
if req.partition(" ")[0] == str(language)
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have {0} requirement without any version constraints.".format(
str(language)
)
)
# 24: jinja2 variable references should be {{<one space>var<one space>}}
if recipe_dir is not None and os.path.exists(meta_fname):
bad_vars = []
bad_lines = []
with io.open(meta_fname, "rt") as fh:
for i, line in enumerate(fh.readlines()):
for m in JINJA_VAR_PAT.finditer(line):
if m.group(1) is not None:
var = m.group(1)
if var != " %s " % var.strip():
bad_vars.append(m.group(1).strip())
bad_lines.append(i+1)
if bad_vars:
hints.append(
"Jinja2 variable references are suggested to "
"take a ``{{<one space><variable name><one space>}}``"
" form. See lines %s." % (bad_lines,)
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
build_section.get("noarch") is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = get_yaml().load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
# 4: Check for SPDX
import license_expression
license = about_section.get("license", "")
licensing = license_expression.Licensing()
parsed_exceptions = []
try:
parsed_licenses = []
parsed_licenses_with_exception = licensing.license_symbols(
license.strip(), decompose=False
)
for l in parsed_licenses_with_exception:
if isinstance(l, license_expression.LicenseWithExceptionSymbol):
parsed_licenses.append(l.license_symbol.key)
parsed_exceptions.append(l.exception_symbol.key)
else:
parsed_licenses.append(l.key)
except license_expression.ExpressionError:
parsed_licenses = [license]
licenseref_regex = re.compile("^LicenseRef[a-zA-Z0-9\-.]*$")
filtered_licenses = []
for license in parsed_licenses:
if not licenseref_regex.match(license):
filtered_licenses.append(license)
with open(
os.path.join(os.path.dirname(__file__), "licenses.txt"), "r"
) as f:
expected_licenses = f.readlines()
expected_licenses = set([l.strip() for l in expected_licenses])
with open(
os.path.join(os.path.dirname(__file__), "license_exceptions.txt"), "r"
) as f:
expected_exceptions = f.readlines()
expected_exceptions = set([l.strip() for l in expected_exceptions])
if set(filtered_licenses) - expected_licenses:
hints.append(
"License is not an SPDX identifier (or a custom LicenseRef) nor an SPDX license expression."
)
if set(parsed_exceptions) - expected_exceptions:
hints.append("License exception is not an SPDX exception.")
return lints, hints
|
30,616 | def test_module():
"""
Performs basic get request to get item samples
"""
request_api_token()
r = requests.get(url=ARC_URL + '/watchlists', headers=CLIENT_HEADERS, verify=VERIFY_CERT)
try:
_ = r.json() if r.text else {}
if not r.ok:
demisto.results('Cannot connect to ARC')
demisto.results('ok')
except TypeError as ex:
demisto.debug(str(ex))
| def test_module():
"""
Performs basic get request to get item samples
"""
request_api_token()
r = requests.get(url=ARC_URL + '/watchlists', headers=CLIENT_HEADERS, verify=VERIFY_CERT)
try:
_ = r.json() if r.text else {}
if not r.ok:
demisto.results('Cannot connect to ARC')
demisto.results('ok')
except TypeError as ex:
demisto.result(str(ex))
|
17,885 | def test_all_attribute():
"""Verify all trait types are added to to `traitlets.__all__`"""
names = dir(traitlets)
for name in names:
value = getattr(traitlets, name)
if not name.startswith("_") and isinstance(value, type) and issubclass(value, TraitType):
if name not in traitlets.__all__:
raise ValueError(f"{name} not in __all__")
for name in traitlets.__all__:
if name not in names:
raise ValueError(f"{name} should be removed from __all__")
| def test_all_attribute():
"""Verify all trait types are added to `traitlets.__all__`"""
names = dir(traitlets)
for name in names:
value = getattr(traitlets, name)
if not name.startswith("_") and isinstance(value, type) and issubclass(value, TraitType):
if name not in traitlets.__all__:
raise ValueError(f"{name} not in __all__")
for name in traitlets.__all__:
if name not in names:
raise ValueError(f"{name} should be removed from __all__")
|
34,608 | def validate_component_keys(pipeline: List["Component"]) -> None:
"""Validates that all keys for a component are valid.
Raises:
InvalidConfigError: If any component has a key specified that is not used
by the component class, it is likely a mistake in the pipeline
Args:
pipeline: The list of components in the piopeline
"""
from rasa.nlu import registry
for component in pipeline:
component_name = component.get("name")
component_class = registry.get_component_class(component_name)
allowed_keys = set(component_class.defaults.keys())
provided_keys = set(component.keys())
provided_keys.remove("name")
listseperator = "\n- "
for key in provided_keys:
if key not in allowed_keys:
rasa.shared.utils.io.raise_warning(
f"You have provided an invalid key `{key}` for component `{component_name}` in your pipeline. "
f"Valid options for `{component_name}` are:\n- "
f"{listseperator.join(allowed_keys)}"
)
| def validate_component_keys(pipeline: List["Component"]) -> None:
"""Validates that all keys for a component are valid.
Raises:
InvalidConfigError: If any component has a key specified that is not used
by the component class, it is likely a mistake in the pipeline
Args:
pipeline: The list of components in the piopeline
"""
from rasa.nlu import registry
for component in pipeline:
component_name = component.get("name")
component_class = registry.get_component_class(component_name)
allowed_keys = set(component_class.defaults.keys())
provided_keys = set(component.keys())
provided_keys.remove("name")
list_separator = "\n- "
for key in provided_keys:
if key not in allowed_keys:
rasa.shared.utils.io.raise_warning(
f"You have provided an invalid key `{key}` for component `{component_name}` in your pipeline. "
f"Valid options for `{component_name}` are:\n- "
f"{listseperator.join(allowed_keys)}"
)
|
28,041 | def __reload_config(args):
"""
Sends the CodeChecker server process a SIGHUP signal, causing it to
reread it's configuration files.
"""
for i in instance_manager.get_instances():
if i['hostname'] != socket.gethostname():
continue
# A RELOAD only reloads the server associated with the given workspace
# and view-port.
if 'reload' in args and \
not (i['port'] == args.view_port and
os.path.abspath(i['workspace']) ==
os.path.abspath(args.config_directory)):
continue
try:
if not sys.platform == "win32":
os.kill(i['pid'], signal.SIGHUP)
except Exception:
LOG.error("Couldn't reload configuration file for process PID #%s",
str(i['pid']))
raise
| def __reload_config(args):
"""
Sends the CodeChecker server process a SIGHUP signal, causing it to
reread it's configuration files.
"""
for i in instance_manager.get_instances():
if i['hostname'] != socket.gethostname():
continue
# A RELOAD only reloads the server associated with the given workspace
# and view-port.
if 'reload' in args and \
not (i['port'] == args.view_port and
os.path.abspath(i['workspace']) ==
os.path.abspath(args.config_directory)):
continue
try:
if sys.platform != "win32":
os.kill(i['pid'], signal.SIGHUP)
except Exception:
LOG.error("Couldn't reload configuration file for process PID #%s",
str(i['pid']))
raise
|
21,214 | def get_app(git_url, branch=None, bench_path='.', skip_assets=False, verbose=False, postprocess=True, overwrite=False):
if not os.path.exists(git_url):
if git_url.startswith('git@'):
pass
elif not check_url(git_url, raise_err=False):
orgs = ['frappe', 'erpnext']
for org in orgs:
url = 'https://api.github.com/repos/{org}/{app}'.format(org=org, app=git_url)
res = requests.get(url)
if res.ok:
data = res.json()
if 'name' in data:
if git_url == data['name']:
git_url = 'https://github.com/{org}/{app}'.format(org=org, app=git_url)
break
else:
bench.utils.log("App {app} not found".format(app=git_url), level=2)
sys.exit(1)
# Gets repo name from URL
repo_name = git_url.rsplit('/', 1)[1].rsplit('.', 1)[0]
shallow_clone = '--depth 1' if check_git_for_shallow_clone() else ''
branch = '--branch {branch}'.format(branch=branch) if branch else ''
else:
repo_name = git_url.split(os.sep)[-1]
shallow_clone = ''
branch = '--branch {branch}'.format(branch=branch) if branch else ''
if os.path.isdir(os.path.join(bench_path, 'apps', repo_name)):
# application directory already exists
# prompt user to overwrite it
if overwrite or click.confirm('''A directory for the application "{0}" already exists.
Do you want to continue and overwrite it?'''.format(repo_name)):
shutil.rmtree(os.path.join(bench_path, 'apps', repo_name))
elif click.confirm('''Do you want to reinstall the existing application?''', abort=True):
app_name = get_app_name(bench_path, repo_name)
install_app(app=app_name, bench_path=bench_path, verbose=verbose, skip_assets=skip_assets)
sys.exit()
logger.info('Getting app {0}'.format(repo_name))
exec_cmd("git clone {git_url} {branch} {shallow_clone} --origin upstream".format(
git_url=git_url,
shallow_clone=shallow_clone,
branch=branch),
cwd=os.path.join(bench_path, 'apps'))
app_name = get_app_name(bench_path, repo_name)
install_app(app=app_name, bench_path=bench_path, verbose=verbose, skip_assets=skip_assets)
| def get_app(git_url, branch=None, bench_path='.', skip_assets=False, verbose=False, postprocess=True, overwrite=False):
if not os.path.exists(git_url):
if not git_url.startswith('git@') and not check_url(git_url, raise_err=False):
orgs = ['frappe', 'erpnext']
for org in orgs:
url = 'https://api.github.com/repos/{org}/{app}'.format(org=org, app=git_url)
res = requests.get(url)
if res.ok:
data = res.json()
if 'name' in data:
if git_url == data['name']:
git_url = 'https://github.com/{org}/{app}'.format(org=org, app=git_url)
break
else:
bench.utils.log("App {app} not found".format(app=git_url), level=2)
sys.exit(1)
# Gets repo name from URL
repo_name = git_url.rsplit('/', 1)[1].rsplit('.', 1)[0]
shallow_clone = '--depth 1' if check_git_for_shallow_clone() else ''
branch = '--branch {branch}'.format(branch=branch) if branch else ''
else:
repo_name = git_url.split(os.sep)[-1]
shallow_clone = ''
branch = '--branch {branch}'.format(branch=branch) if branch else ''
if os.path.isdir(os.path.join(bench_path, 'apps', repo_name)):
# application directory already exists
# prompt user to overwrite it
if overwrite or click.confirm('''A directory for the application "{0}" already exists.
Do you want to continue and overwrite it?'''.format(repo_name)):
shutil.rmtree(os.path.join(bench_path, 'apps', repo_name))
elif click.confirm('''Do you want to reinstall the existing application?''', abort=True):
app_name = get_app_name(bench_path, repo_name)
install_app(app=app_name, bench_path=bench_path, verbose=verbose, skip_assets=skip_assets)
sys.exit()
logger.info('Getting app {0}'.format(repo_name))
exec_cmd("git clone {git_url} {branch} {shallow_clone} --origin upstream".format(
git_url=git_url,
shallow_clone=shallow_clone,
branch=branch),
cwd=os.path.join(bench_path, 'apps'))
app_name = get_app_name(bench_path, repo_name)
install_app(app=app_name, bench_path=bench_path, verbose=verbose, skip_assets=skip_assets)
|
55,747 | def test_default_properties_assignment():
"""Test that the default properties value can be assigned to properties
see https://github.com/napari/napari/issues/2477
"""
np.random.seed(0)
data = np.random.randint(20, size=(10, 15))
layer = Labels(data)
layer.properties = layer.properties
| def test_default_properties_assignment():
"""Test that the default properties value can be assigned to properties
see https://github.com/napari/napari/issues/2477
"""
np.random.seed(0)
data = np.random.randint(20, size=(10, 15))
layer = Labels(data)
layer.properties = {}
assert layer.properties == {}
|
22,734 | def main():
parser = argparse.ArgumentParser(
description='CLI tool to start a local instance of Pebble or Boulder CA server.')
parser.add_argument('--server-type', '-s',
choices=['pebble', 'boulder-v1', 'boulder-v2'], default='pebble',
help='type of CA server to start: can be Pebble or Boulder '
'(in ACMEv1 or ACMEv2 mode), Pebble is used if not set.')
parser.add_argument('--dns-server', '-d',
help='(Pebble specific) specify the DNS server as `IP:PORT` to use by '
'Pebble; if not specified, a local mock DNS server will be used to '
'resolve domains to localhost.')
args = parser.parse_args()
acme_server = ACMEServer(args.server_type, [], http_proxy=False, stdout=True, dns_server=args.dns_server)
try:
with acme_server as acme_xdist:
print('--> Instance of {0} is running, directory URL is {0}'
.format(acme_xdist['directory_url']))
print('--> Press CTRL+C to stop the ACME server.')
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
| def main():
parser = argparse.ArgumentParser(
description='CLI tool to start a local instance of Pebble or Boulder CA server.')
parser.add_argument('--server-type', '-s',
choices=['pebble', 'boulder-v1', 'boulder-v2'], default='pebble',
help='type of CA server to start: can be Pebble or Boulder '
'(in ACMEv1 or ACMEv2 mode), Pebble is used if not set.')
parser.add_argument('--dns-server', '-d',
help='specify the DNS server as `IP:PORT` to use by '
'Pebble; if not specified, a local mock DNS server will be used to '
'resolve domains to localhost.')
args = parser.parse_args()
acme_server = ACMEServer(args.server_type, [], http_proxy=False, stdout=True, dns_server=args.dns_server)
try:
with acme_server as acme_xdist:
print('--> Instance of {0} is running, directory URL is {0}'
.format(acme_xdist['directory_url']))
print('--> Press CTRL+C to stop the ACME server.')
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
|
12,034 | def format_array(arr):
"""
Returns the given array as a string, using the python builtin str
function on a piecewise basis.
Useful for xml representation of arrays.
For customisations, use the :mod:`numpy.core.arrayprint` directly.
"""
summary_threshold = 85
summary_insert = "..." if arr.size > summary_threshold else ""
edge_items = 3
ffunc = str
max_line_len = 50
formatArray = np.core.arrayprint._formatArray
format_options = np.core.arrayprint._format_options
options = np.get_printoptions()
options["legacy"] = "1.13"
with np.printoptions(**options):
result = formatArray(
arr,
ffunc,
max_line_len,
next_line_prefix="\t\t",
separator=", ",
edge_items=edge_items,
summary_insert=summary_insert,
legacy=format_options["legacy"],
)
return result
| def format_array(arr):
"""
Returns the given array as a string, using the python builtin str
function on a piecewise basis.
Useful for xml representation of arrays.
For customisations, use the :mod:`numpy.core.arrayprint` directly.
"""
summary_threshold = 85
summary_insert = "..." if arr.size > summary_threshold else ""
edge_items = 3
ffunc = str
max_line_len = 50
formatArray = np.core.arrayprint._formatArray
format_options = np.core.arrayprint._format_options
options = np.get_printoptions()
options["legacy"] = "1.13"
with np.printoptions(**options):
result = formatArray(
arr,
ffunc,
max_line_len,
next_line_prefix="\t\t",
separator=", ",
edge_items=edge_items,
summary_insert=summary_insert,
legacy=False,
)
return result
|
13,635 | def load_module(path):
"""Creates model from configuration file.
Args:
path (string): Path to the configuration file relative to pymor_source.
Returns:
model: model as loaded from the file.
"""
with open(path, "r") as stream:
try:
load_dict = yaml.safe_load(stream)
print(load_dict)
except yaml.YAMLError as exc:
print(exc)
# construct the parameter object first
parameters = None
if ("parameters" in load_dict.keys()):
# parameters = Parameters()
NotImplemented
# parse loaded dict, combine it with parameters,
# construct the objects and write it in model_parameters dict
model_parameters = {}
for key, value in load_dict.items():
if key == "type":
model_parameters["type"] = value
# identify operators
elif key == "operator" or key == "products" or ".mat" in value:
model_parameters[key] = construct_operator(parameters, **value)
elif key == "parameters":
continue
else:
raise ValueError(f"The key {key} given is not permitted.")
# construct the model
return construct_model(**model_parameters)
| def load_module(path):
"""Creates model from configuration file.
Args:
path (string): Path to the configuration file relative to pymor_source.
Returns:
model: |Model| as loaded from the file.
"""
with open(path, "r") as stream:
try:
load_dict = yaml.safe_load(stream)
print(load_dict)
except yaml.YAMLError as exc:
print(exc)
# construct the parameter object first
parameters = None
if ("parameters" in load_dict.keys()):
# parameters = Parameters()
NotImplemented
# parse loaded dict, combine it with parameters,
# construct the objects and write it in model_parameters dict
model_parameters = {}
for key, value in load_dict.items():
if key == "type":
model_parameters["type"] = value
# identify operators
elif key == "operator" or key == "products" or ".mat" in value:
model_parameters[key] = construct_operator(parameters, **value)
elif key == "parameters":
continue
else:
raise ValueError(f"The key {key} given is not permitted.")
# construct the model
return construct_model(**model_parameters)
|
17,399 | def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
else:
if filename_or_obj.tell() != 0:
raise ValueError(
"file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager"
)
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b"CDF"):
engine = "scipy"
elif magic_number.startswith(b"\211HDF\r\n\032\n"):
engine = "h5netcdf"
else:
raise ValueError(
"{} is not the signature of any supported file format "
"did you mean to pass a string for a path instead?".format(magic_number)
)
return engine
| def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
else:
if filename_or_obj.tell() != 0:
raise ValueError(
"file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager"
)
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b"CDF"):
engine = "scipy"
elif magic_number.startswith(b"\211HDF\r\n\032\n"):
engine = "h5netcdf"
else:
raise ValueError(
"{} is not the signature of any supported file format "
"did you mean to pass a string for a path instead?"
)
return engine
|
50,135 | def common_verify(client, expected_keys):
for user, filename, keys in expected_keys:
# Ensure key is in the key file
contents = client.read_from_file(filename)
if user in ['ubuntu', 'root']:
# Our personal public key gets added by pycloudlib
lines = contents.split('\n')
assert len(lines) == 2
assert keys.public_key.strip() in contents
else:
assert contents.strip() == keys.public_key.strip()
# Ensure we can actually connect
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
paramiko_key = paramiko.RSAKey.from_private_key(StringIO(
keys.private_key))
# Will fail with AuthenticationException if
# we cannot connect
ssh.connect(
client.instance.ip,
username=user,
pkey=paramiko_key,
look_for_keys=False,
allow_agent=False,
)
# Ensure other uses can't connect using our key
other_users = [u[0] for u in expected_keys if u[2] != keys]
for other_user in other_users:
with pytest.raises(SSHException):
print('trying to connect as {} with key from {}'.format(
other_user, user))
ssh.connect(
client.instance.ip,
username=other_user,
pkey=paramiko_key,
look_for_keys=False,
allow_agent=False,
)
# Ensure we haven't messed with any /home permissions
# See LP: #1940233
home_dir = '/home/{}'.format(user)
# Home permissions aren't consistent between releases. On ubuntu
# this can change to 750 once focal is unsupported.
home_perms = '75'
if user == 'root':
home_dir = '/root'
home_perms = '700'
client.execute(
'stat -c "%U %a" {}'.format(home_dir)
).startswith('{} {}'.format(user, home_perms))
if client.execute("test -d {}/.ssh".format(home_dir)).ok:
assert '{} 700'.format(user) == client.execute(
'stat -c "%U %a" {}/.ssh'.format(home_dir)
)
assert '{} 600'.format(user) == client.execute(
'stat -c "%U %a" {}'.format(filename)
)
# Also ensure ssh-keygen works as expected
client.execute('mkdir {}/.ssh'.format(home_dir))
assert client.execute(
"ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format(
home_dir)
).ok
assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir))
assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir))
assert 'root 755' == client.execute('stat -c "%U %a" /home')
| def common_verify(client, expected_keys):
for user, filename, keys in expected_keys:
# Ensure key is in the key file
contents = client.read_from_file(filename)
if user in ['ubuntu', 'root']:
# Our personal public key gets added by pycloudlib
lines = contents.split('\n')
assert len(lines) == 2
assert keys.public_key.strip() in contents
else:
assert contents.strip() == keys.public_key.strip()
# Ensure we can actually connect
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
paramiko_key = paramiko.RSAKey.from_private_key(StringIO(
keys.private_key))
# Will fail with AuthenticationException if
# we cannot connect
ssh.connect(
client.instance.ip,
username=user,
pkey=paramiko_key,
look_for_keys=False,
allow_agent=False,
)
# Ensure other uses can't connect using our key
other_users = [u[0] for u in expected_keys if u[2] != keys]
for other_user in other_users:
with pytest.raises(SSHException):
print('trying to connect as {} with key from {}'.format(
other_user, user))
ssh.connect(
client.instance.ip,
username=other_user,
pkey=paramiko_key,
look_for_keys=False,
allow_agent=False,
)
# Ensure we haven't messed with any /home permissions
# See LP: #1940233
home_dir = '/home/{}'.format(user)
# Home permissions aren't consistent between releases. On ubuntu
# this can change to 750 once focal is unsupported.
if ImageSpecification.from_os_image().release in ("bionic", "focal"):
home_perms = '755'
else:
home_perms = '750'
if user == 'root':
home_dir = '/root'
home_perms = '700'
client.execute(
'stat -c "%U %a" {}'.format(home_dir)
).startswith('{} {}'.format(user, home_perms))
if client.execute("test -d {}/.ssh".format(home_dir)).ok:
assert '{} 700'.format(user) == client.execute(
'stat -c "%U %a" {}/.ssh'.format(home_dir)
)
assert '{} 600'.format(user) == client.execute(
'stat -c "%U %a" {}'.format(filename)
)
# Also ensure ssh-keygen works as expected
client.execute('mkdir {}/.ssh'.format(home_dir))
assert client.execute(
"ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format(
home_dir)
).ok
assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir))
assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir))
assert 'root 755' == client.execute('stat -c "%U %a" /home')
|
32,481 | def slack_send():
"""
Sends a message to slack
"""
args = demisto.args()
message = args.get('message', '')
to = args.get('to')
original_channel = args.get('channel')
channel_id = demisto.args().get('channel_id', '')
group = args.get('group')
message_type = args.get('messageType', '') # From server
original_message = args.get('originalMessage', '') # From server
entry = args.get('entry')
ignore_add_url = args.get('ignoreAddURL', False) or args.get('IgnoreAddURL', False)
thread_id = args.get('threadID', '')
severity = args.get('severity') # From server
blocks = args.get('blocks')
reply_broadcast = args.get('reply_broadcast')
entry_object = args.get('entryObject') # From server, available from demisto v6.1 and above
entitlement = ''
if message_type and (message_type not in PERMITTED_NOTIFICATION_TYPES):
if message_type != MIRROR_TYPE:
demisto.info(f"Message type is not in permitted options. Received: {message_type}")
return
if message_type == MIRROR_TYPE and original_message.find(MESSAGE_FOOTER) != -1:
# return so there will not be a loop of messages
return
if message_type == MIRROR_TYPE:
tags = argToList(demisto.params().get('filtered_tags', []))
entry_tags = entry_object.get('tags', [])
if tags and not entry_tags:
return
# return if the entry tags is not containing any of the filtered_tags
if tags and not any(elem in entry_tags for elem in tags):
return
if (to and group) or (to and original_channel) or (to and original_channel and group):
return_error('Only one destination can be provided.')
if severity:
try:
severity = int(severity)
except Exception:
severity = None
channel = original_channel
if original_channel == INCIDENT_NOTIFICATION_CHANNEL or (not original_channel and message_type == INCIDENT_OPENED):
original_channel = INCIDENT_NOTIFICATION_CHANNEL
channel = DEDICATED_CHANNEL
demisto.debug(f'trying to send message to channel {original_channel}, changing slack channel to {channel}')
if (channel == DEDICATED_CHANNEL and original_channel == INCIDENT_NOTIFICATION_CHANNEL
and ((severity is not None and severity < SEVERITY_THRESHOLD)
or not (len(CUSTOM_PERMITTED_NOTIFICATION_TYPES) > 0))):
channel = None
if not (to or group or channel or channel_id):
return_error('Either a user, group, channel id, or channel must be provided.')
reply = ''
expiry = ''
default_response = ''
if blocks:
entitlement_match = re.search(ENTITLEMENT_REGEX, blocks)
if entitlement_match:
try:
parsed_message = json.loads(blocks)
entitlement = parsed_message.get('entitlement')
blocks = parsed_message.get('blocks')
reply = parsed_message.get('reply')
expiry = parsed_message.get('expiry')
default_response = parsed_message.get('default_response')
except Exception:
demisto.info('Slack - could not parse JSON from entitlement blocks.')
elif message:
entitlement_match = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
try:
parsed_message = json.loads(message)
entitlement = parsed_message.get('entitlement')
message = parsed_message.get('message')
reply = parsed_message.get('reply')
expiry = parsed_message.get('expiry')
default_response = parsed_message.get('default_response')
except Exception:
demisto.info('Slack - could not parse JSON from entitlement message.')
response = slack_send_request(to, channel, group, entry, ignore_add_url, thread_id, message=message, blocks=blocks,
channel_id=channel_id, reply_broadcast=reply_broadcast)
if response:
thread = response.get('ts')
if entitlement:
save_entitlement(entitlement, thread, reply, expiry, default_response)
demisto.results({
'Type': entryTypes['note'],
'HumanReadable': f'Message sent to Slack successfully.\nThread ID is: {thread}',
'Contents': response.data,
'ContentsFormat': formats['json'],
'EntryContext': {
'Slack.Thread(val.ID===obj.ID)': {
'ID': thread
},
}
})
else:
demisto.results('Could not send the message to Slack.')
| def slack_send():
"""
Sends a message to slack
"""
args = demisto.args()
message = args.get('message', '')
to = args.get('to')
original_channel = args.get('channel')
channel_id = demisto.args().get('channel_id', '')
group = args.get('group')
message_type = args.get('messageType', '') # From server
original_message = args.get('originalMessage', '') # From server
entry = args.get('entry')
ignore_add_url = args.get('ignoreAddURL', False) or args.get('IgnoreAddURL', False)
thread_id = args.get('threadID', '')
severity = args.get('severity') # From server
blocks = args.get('blocks')
reply_broadcast = argToBoolean(args.get('reply_broadcast'))
entry_object = args.get('entryObject') # From server, available from demisto v6.1 and above
entitlement = ''
if message_type and (message_type not in PERMITTED_NOTIFICATION_TYPES):
if message_type != MIRROR_TYPE:
demisto.info(f"Message type is not in permitted options. Received: {message_type}")
return
if message_type == MIRROR_TYPE and original_message.find(MESSAGE_FOOTER) != -1:
# return so there will not be a loop of messages
return
if message_type == MIRROR_TYPE:
tags = argToList(demisto.params().get('filtered_tags', []))
entry_tags = entry_object.get('tags', [])
if tags and not entry_tags:
return
# return if the entry tags is not containing any of the filtered_tags
if tags and not any(elem in entry_tags for elem in tags):
return
if (to and group) or (to and original_channel) or (to and original_channel and group):
return_error('Only one destination can be provided.')
if severity:
try:
severity = int(severity)
except Exception:
severity = None
channel = original_channel
if original_channel == INCIDENT_NOTIFICATION_CHANNEL or (not original_channel and message_type == INCIDENT_OPENED):
original_channel = INCIDENT_NOTIFICATION_CHANNEL
channel = DEDICATED_CHANNEL
demisto.debug(f'trying to send message to channel {original_channel}, changing slack channel to {channel}')
if (channel == DEDICATED_CHANNEL and original_channel == INCIDENT_NOTIFICATION_CHANNEL
and ((severity is not None and severity < SEVERITY_THRESHOLD)
or not (len(CUSTOM_PERMITTED_NOTIFICATION_TYPES) > 0))):
channel = None
if not (to or group or channel or channel_id):
return_error('Either a user, group, channel id, or channel must be provided.')
reply = ''
expiry = ''
default_response = ''
if blocks:
entitlement_match = re.search(ENTITLEMENT_REGEX, blocks)
if entitlement_match:
try:
parsed_message = json.loads(blocks)
entitlement = parsed_message.get('entitlement')
blocks = parsed_message.get('blocks')
reply = parsed_message.get('reply')
expiry = parsed_message.get('expiry')
default_response = parsed_message.get('default_response')
except Exception:
demisto.info('Slack - could not parse JSON from entitlement blocks.')
elif message:
entitlement_match = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
try:
parsed_message = json.loads(message)
entitlement = parsed_message.get('entitlement')
message = parsed_message.get('message')
reply = parsed_message.get('reply')
expiry = parsed_message.get('expiry')
default_response = parsed_message.get('default_response')
except Exception:
demisto.info('Slack - could not parse JSON from entitlement message.')
response = slack_send_request(to, channel, group, entry, ignore_add_url, thread_id, message=message, blocks=blocks,
channel_id=channel_id, reply_broadcast=reply_broadcast)
if response:
thread = response.get('ts')
if entitlement:
save_entitlement(entitlement, thread, reply, expiry, default_response)
demisto.results({
'Type': entryTypes['note'],
'HumanReadable': f'Message sent to Slack successfully.\nThread ID is: {thread}',
'Contents': response.data,
'ContentsFormat': formats['json'],
'EntryContext': {
'Slack.Thread(val.ID===obj.ID)': {
'ID': thread
},
}
})
else:
demisto.results('Could not send the message to Slack.')
|
24,312 | def update_link_metadata(checks, core_workflow=True):
root = get_root()
ensure_dir_exists(path_join(root, LINK_DIR))
# Sign only what affects each wheel
products = []
for check in checks:
products.append(path_join(check, 'datadog_checks'))
products.append(path_join(check, 'setup.py'))
if core_workflow:
key_id = get_key_id(GPG_COMMAND)
# Find this latest signed link metadata file on disk.
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = key_id[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'gpg_keyid': key_id}
else:
signing_key_path = os.getenv('IN_TOTO_SIGNING_KEY_PATH', '')
signing_key = util.import_rsa_key_from_file(signing_key_path, os.getenv('IN_TOTO_SIGNING_KEY_PASSWORD'))
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = signing_key_path[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'signing_key': signing_key}
# Final location of metadata file.
metadata_file = path_join(LINK_DIR, tag_link)
with chdir(root):
# We should ignore products untracked and ignored by git.
run_in_toto(products, **options)
# Check whether each signed product is being tracked AND ignored by git.
# NOTE: We have to check now *AFTER* signing the tag link file, so that
# we can check against the actual complete list of products.
with open(tag_link) as tag_json:
tag = json.load(tag_json)
products = tag['signed']['products']
for product in products:
# If NOT tracked...
if not tracked_by_git(product):
# First, delete the tag link off disk so as not to pollute.
os.remove(tag_link)
# AND NOT ignored, then it most likely means the developer
# forgot to add the file to git.
if not ignored_by_git(product):
raise NeitherTrackedNorIgnoredFileException(product)
# AND ignored, then it most likely means that incorrectly
# recorded with in-toto files ignored by git.
else:
raise UntrackedButIgnoredFileException(product)
# Move it to the expected location.
shutil.move(tag_link, metadata_file)
return (metadata_file,)
| def update_link_metadata(checks, core_workflow=True):
root = get_root()
ensure_dir_exists(path_join(root, LINK_DIR))
# Sign only what affects each wheel
products = []
for check in checks:
products.append(path_join(check, 'datadog_checks'))
products.append(path_join(check, 'setup.py'))
if core_workflow:
key_id = get_key_id(GPG_COMMAND)
# Find this latest signed link metadata file on disk.
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = key_id[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'gpg_keyid': key_id}
else:
signing_key_path = os.getenv('IN_TOTO_SIGNING_KEY_PATH', '')
signing_key = util.import_rsa_key_from_file(signing_key_path, os.getenv('IN_TOTO_SIGNING_KEY_PASSWORD'))
# NOTE: in-toto currently uses the first 8 characters of the signing keyId.
key_id_prefix = os.path.basename(signing_key_path)[:8].lower()
tag_link = f'{STEP_NAME}.{key_id_prefix}.link'
options = {'signing_key': signing_key}
# Final location of metadata file.
metadata_file = path_join(LINK_DIR, tag_link)
with chdir(root):
# We should ignore products untracked and ignored by git.
run_in_toto(products, **options)
# Check whether each signed product is being tracked AND ignored by git.
# NOTE: We have to check now *AFTER* signing the tag link file, so that
# we can check against the actual complete list of products.
with open(tag_link) as tag_json:
tag = json.load(tag_json)
products = tag['signed']['products']
for product in products:
# If NOT tracked...
if not tracked_by_git(product):
# First, delete the tag link off disk so as not to pollute.
os.remove(tag_link)
# AND NOT ignored, then it most likely means the developer
# forgot to add the file to git.
if not ignored_by_git(product):
raise NeitherTrackedNorIgnoredFileException(product)
# AND ignored, then it most likely means that incorrectly
# recorded with in-toto files ignored by git.
else:
raise UntrackedButIgnoredFileException(product)
# Move it to the expected location.
shutil.move(tag_link, metadata_file)
return (metadata_file,)
|
45,513 | def test_get_channels_data_repsonse_structure():
# Given
api_token = "test_token"
response_data = {
"ok": True,
"channels": [
{
"id": "id1",
"name": "channel1",
"is_channel": True,
"num_members": 3,
},
{
"id": "id2",
"name": "channel2",
"is_channel": True,
"num_members": 3,
},
],
"response_metadata": {"next_cursor": "dGVhbTpDMDI3MEpNRldNVg=="},
}
# When
with mock.patch("integrations.slack.slack.get_client") as client:
client.return_value.conversations_list.return_value = response_data
response = get_channels_data(api_token)
# Then
assert response == [
{"channel_name": "channel1", "channel_id": "id1"},
{"channel_name": "channel2", "channel_id": "id2"},
]
client.assert_called_with(api_token)
client.return_value.conversations_list.assert_called_with(exclude_archived=True)
| def test_get_channels_data_response_structure():
# Given
api_token = "test_token"
response_data = {
"ok": True,
"channels": [
{
"id": "id1",
"name": "channel1",
"is_channel": True,
"num_members": 3,
},
{
"id": "id2",
"name": "channel2",
"is_channel": True,
"num_members": 3,
},
],
"response_metadata": {"next_cursor": "dGVhbTpDMDI3MEpNRldNVg=="},
}
# When
with mock.patch("integrations.slack.slack.get_client") as client:
client.return_value.conversations_list.return_value = response_data
response = get_channels_data(api_token)
# Then
assert response == [
{"channel_name": "channel1", "channel_id": "id1"},
{"channel_name": "channel2", "channel_id": "id2"},
]
client.assert_called_with(api_token)
client.return_value.conversations_list.assert_called_with(exclude_archived=True)
|
43,440 | def CRoty(theta):
r"""Two-qubit controlled rotation about the y axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 4x4 rotation matrix `
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta/2), -1*np.sin(theta/2)], [0, 0, np.sin(theta/2), np.cos(theta/2)]])
| def CRoty(theta):
r"""Two-qubit controlled rotation about the y axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 4x4 rotation matrix
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta/2), -1*np.sin(theta/2)], [0, 0, np.sin(theta/2), np.cos(theta/2)]])
|
31,983 | def wildfire_get_verdict_command():
file_hashes = hash_args_handler(demisto.args().get('hash', ''))
urls = argToList(demisto.args().get('url', ''))
if not urls and not file_hashes:
raise Exception('Specify exactly 1 of the following arguments: url, hash.')
if file_hashes:
for file_hash in file_hashes:
result, verdict_data = wildfire_get_verdict(file_hash=file_hash)
pretty_verdict = prettify_verdict(verdict_data)
human_readable = tableToMarkdown('WildFire Verdict', pretty_verdict, removeNull=True)
dbot_score_list = create_dbot_score_from_verdict(pretty_verdict)
entry_context = {
"WildFire.Verdicts(val.SHA256 && val.SHA256 == obj.SHA256 || val.MD5 && val.MD5 == obj.MD5)":
pretty_verdict,
"DBotScore": dbot_score_list
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': entry_context
})
else:
for url in urls:
result, verdict_data = wildfire_get_verdict(url=url)
pretty_verdict = prettify_url_verdict(verdict_data)
human_readable = tableToMarkdown('WildFire URL Verdict', pretty_verdict, removeNull=True)
dbot_score_list = create_dbot_score_from_url_verdict(pretty_verdict)
entry_context = {
"WildFire.Verdicts(val.url && val.url == obj.url)":
pretty_verdict,
"DBotScore": dbot_score_list
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': entry_context
})
| def wildfire_get_verdict_command():
file_hashes = hash_args_handler(demisto.args().get('hash', ''))
urls = argToList(demisto.args().get('url', ''))
if not urls and not file_hashes:
raise Exception('Either hash or url must be provided.')
if file_hashes:
for file_hash in file_hashes:
result, verdict_data = wildfire_get_verdict(file_hash=file_hash)
pretty_verdict = prettify_verdict(verdict_data)
human_readable = tableToMarkdown('WildFire Verdict', pretty_verdict, removeNull=True)
dbot_score_list = create_dbot_score_from_verdict(pretty_verdict)
entry_context = {
"WildFire.Verdicts(val.SHA256 && val.SHA256 == obj.SHA256 || val.MD5 && val.MD5 == obj.MD5)":
pretty_verdict,
"DBotScore": dbot_score_list
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': entry_context
})
else:
for url in urls:
result, verdict_data = wildfire_get_verdict(url=url)
pretty_verdict = prettify_url_verdict(verdict_data)
human_readable = tableToMarkdown('WildFire URL Verdict', pretty_verdict, removeNull=True)
dbot_score_list = create_dbot_score_from_url_verdict(pretty_verdict)
entry_context = {
"WildFire.Verdicts(val.url && val.url == obj.url)":
pretty_verdict,
"DBotScore": dbot_score_list
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': entry_context
})
|
18,964 | def add(name, url, scope, args={}):
"""Add a named mirror in the given scope"""
mirrors = spack.config.get('mirrors', scope=scope)
if not mirrors:
mirrors = syaml_dict()
if name in mirrors:
tty.die("Mirror with name %s already exists." % name)
items = [(n, u) for n, u in mirrors.items()]
mirror_data = url
key_values = ["s3_access_key_id", "s3_access_token"]
if [value for value in key_values if value in args]:
mirror_data = {"fetch": url, "push": url,
"id": args.s3_access_key_id,
"secret": args.s3_access_key_secret,
"token": args.s3_access_token}
items.insert(0, (name, mirror_data))
mirrors = syaml_dict(items)
spack.config.set('mirrors', mirrors, scope=scope)
| def add(name, url, scope, args={}):
"""Add a named mirror in the given scope"""
mirrors = spack.config.get('mirrors', scope=scope)
if not mirrors:
mirrors = syaml_dict()
if name in mirrors:
tty.die("Mirror with name %s already exists." % name)
items = [(n, u) for n, u in mirrors.items()]
mirror_data = url
key_values = ["s3_access_key_id", "s3_access_token"]
if any(value for value in key_values if value in args):
mirror_data = {"fetch": url, "push": url,
"id": args.s3_access_key_id,
"secret": args.s3_access_key_secret,
"token": args.s3_access_token}
items.insert(0, (name, mirror_data))
mirrors = syaml_dict(items)
spack.config.set('mirrors', mirrors, scope=scope)
|
17,411 | def _mapping_repr(mapping, title, summarizer, col_width=None, max_rows=None):
if col_width is None:
col_width = _calculate_col_width(mapping)
if max_rows is None:
max_rows = OPTIONS["display_max_rows"]
summary = [f"{title}:"]
if mapping:
if len(mapping) > max_rows:
first_rows = max_rows // 2 + max_rows % 2
items = [*mapping.items()]
summary += [summarizer(k, v, col_width) for k, v in items[:first_rows]]
if max_rows > 1:
last_rows = max_rows // 2
summary += [pretty_print(" ...", col_width) + " ..."]
summary += [summarizer(k, v, col_width) for k, v in items[-last_rows:]]
else:
summary += [summarizer(k, v, col_width) for k, v in mapping.items()]
else:
summary += [EMPTY_REPR]
return "\n".join(summary)
| def _mapping_repr(mapping, title, summarizer, col_width=None, max_rows=None):
if col_width is None:
col_width = _calculate_col_width(mapping)
if max_rows is None:
max_rows = OPTIONS["display_max_rows"]
summary = [f"{title}:"]
if mapping:
if len(mapping) > max_rows:
first_rows = max_rows // 2 + max_rows % 2
items = list(mapping.items())
summary += [summarizer(k, v, col_width) for k, v in items[:first_rows]]
if max_rows > 1:
last_rows = max_rows // 2
summary += [pretty_print(" ...", col_width) + " ..."]
summary += [summarizer(k, v, col_width) for k, v in items[-last_rows:]]
else:
summary += [summarizer(k, v, col_width) for k, v in mapping.items()]
else:
summary += [EMPTY_REPR]
return "\n".join(summary)
|
56,836 | def test_should_hide_feature_notifs_for_non_pro_with_groups():
with case_sharing_groups_patch(['agroupid']), active_service_type_patch("not_IMPLEMENTATION_or_SANDBOX"):
hide = NotificationsServiceRMIView._should_hide_feature_notifs("test", None)
assert not hide, "notifications should not be hidden for pro domain without groups"
| def test_should_hide_feature_notifs_for_non_pro_with_groups():
with case_sharing_groups_patch([]), active_service_type_patch("not_IMPLEMENTATION_or_SANDBOX"):
hide = NotificationsServiceRMIView._should_hide_feature_notifs("test", None)
assert not hide, "notifications should not be hidden for non pro domain with or without groups"
|
55,831 | def finalize_on_12(spec, state, epoch, sufficient_support, messed_up_target):
assert epoch > 2
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
# 43210 -- epochs ago
# 210xx -- justification bitfield indices (pre shift)
# 3210x -- justification bitfield indices (post shift)
# 001*. -- justification bitfield contents, . = this epoch, * is being justified now
# checkpoints for the epochs ago:
c1, c2, _, _, _ = get_checkpoints(spec, epoch)
put_checkpoints_in_block_roots(spec, state, [c1, c2])
old_finalized = state.finalized_checkpoint
state.previous_justified_checkpoint = c2
state.current_justified_checkpoint = c2
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
state.justification_bits[0] = 1 # mock 2nd latest epoch as justified (this is pre-shift)
# mock the 1st latest epoch as justifiable, with 2nd as source
add_mock_attestations(spec, state,
epoch=epoch - 1,
source=c2,
target=c1,
sufficient_support=sufficient_support, messed_up_target=messed_up_target)
# process!
yield from run_process_just_and_fin(spec, state)
assert state.previous_justified_checkpoint == c2 # changed to old current
if sufficient_support and not messed_up_target:
assert state.current_justified_checkpoint == c1 # changed to 1st latest
assert state.finalized_checkpoint == c2 # finalized previous justified epoch
else:
assert state.current_justified_checkpoint == c2 # still old current
assert state.finalized_checkpoint == old_finalized # no new finalized
| def finalize_on_12(spec, state, epoch, sufficient_support, messed_up_target):
assert epoch > 2
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
# 43210 -- epochs ago
# 210xx -- justification bitfield indices (pre shift)
# 3210x -- justification bitfield indices (post shift)
# 001*. -- justification bitfield contents, . = this epoch, * is being justified now
# checkpoints for the epochs ago:
c1, c2, _, _, _ = get_checkpoints(spec, epoch)
put_checkpoints_in_block_roots(spec, state, [c1, c2])
old_finalized = state.finalized_checkpoint
state.previous_justified_checkpoint = c2
state.current_justified_checkpoint = c2
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
state.justification_bits[0] = 1 # mock 2nd latest epoch as justified (this is pre-shift)
# mock the 1st latest epoch as justifiable, with 2nd as source
add_mock_attestations(spec, state,
epoch=epoch - 1,
source=c2,
target=c1,
sufficient_support=sufficient_support,
messed_up_target=messed_up_target)
# process!
yield from run_process_just_and_fin(spec, state)
assert state.previous_justified_checkpoint == c2 # changed to old current
if sufficient_support and not messed_up_target:
assert state.current_justified_checkpoint == c1 # changed to 1st latest
assert state.finalized_checkpoint == c2 # finalized previous justified epoch
else:
assert state.current_justified_checkpoint == c2 # still old current
assert state.finalized_checkpoint == old_finalized # no new finalized
|
25,613 | def _mix_latent_gp(W, g_mu, g_var, full_cov, full_output_cov):
r"""
Takes the mean and variance of a uncorrelated L-dimensional latent GP
and returns the mean and the variance of the mixed GP, `f = W \times g`,
where both f and g are GPs.
:param W: [P, L]
:param g_mu: [..., N, L]
:param g_var: [..., N, L] or [L, ..., N, N]
:return: f_mu and f_var, shape depends on `full_cov` and `full_output_cov`
"""
f_mu = tf.tensordot(g_mu, W, [[-1], [-1]]) # [..., N, P]
K = tf.rank(g_var)
leading_dims = (K - 3) if full_cov else (K - 2)
if full_cov and full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
g_var = _rollaxis(g_var, 1) # [..., N, N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, N, P, P]
perm = _get_perm_with_leading_dims(leading_dims, K-3, K-1, K-2, K)
f_var = tf.transpose(f_var, perm) # [..., N, P, N, P]
elif full_cov and not full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
f_var = tf.tensordot(g_var, W**2, [[0], [-1]]) # [..., N, N, P]
perm = _get_perm_with_leading_dims(leading_dims, K-1, K-3, K-2)
f_var = tf.transpose(f_var, perm) # [..., P, N, N]
elif not full_cov and full_output_cov: # g_var is [..., N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, P, P]
elif not full_cov and not full_output_cov: # g_var is [..., N, L]
W_squared = W**2 # [P, L]
f_var = tf.tensordot(g_var, W_squared, [[-1], [-1]]) # [..., N, P]
return f_mu, f_var
| def _mix_latent_gp(W, g_mu, g_var, full_cov, full_output_cov):
r"""
Takes the mean and variance of an uncorrelated L-dimensional latent GP
and returns the mean and the variance of the mixed GP, `f = W \times g`,
where both f and g are GPs.
:param W: [P, L]
:param g_mu: [..., N, L]
:param g_var: [..., N, L] or [L, ..., N, N]
:return: f_mu and f_var, shape depends on `full_cov` and `full_output_cov`
"""
f_mu = tf.tensordot(g_mu, W, [[-1], [-1]]) # [..., N, P]
K = tf.rank(g_var)
leading_dims = (K - 3) if full_cov else (K - 2)
if full_cov and full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
g_var = _rollaxis(g_var, 1) # [..., N, N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, N, P, P]
perm = _get_perm_with_leading_dims(leading_dims, K-3, K-1, K-2, K)
f_var = tf.transpose(f_var, perm) # [..., N, P, N, P]
elif full_cov and not full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
f_var = tf.tensordot(g_var, W**2, [[0], [-1]]) # [..., N, N, P]
perm = _get_perm_with_leading_dims(leading_dims, K-1, K-3, K-2)
f_var = tf.transpose(f_var, perm) # [..., P, N, N]
elif not full_cov and full_output_cov: # g_var is [..., N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, P, P]
elif not full_cov and not full_output_cov: # g_var is [..., N, L]
W_squared = W**2 # [P, L]
f_var = tf.tensordot(g_var, W_squared, [[-1], [-1]]) # [..., N, P]
return f_mu, f_var
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.