id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
35,711 |
def _mobilenet_extractor(
backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3],
fpn: bool,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> nn.Module:
backbone = backbone.features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
num_stages = len(stage_indices)
# find the index of the layer from which we wont freeze
if trainable_layers not in range(0, num_stages + 1):
raise ValueError(f"trainable_layers expected to be in between [0,{num_stages}], got {trainable_layers} ")
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]:
for parameter in b.parameters():
parameter.requires_grad_(False)
out_channels = 256
if fpn:
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [num_stages - 2, num_stages - 1]
if min(returned_layers) <= 0 or max(returned_layers) >= num_stages:
raise ValueError(
f" returned_layers object should contain integers between [1,{num_stages - 1}], got {returned_layers} "
)
return_layers = {f"{stage_indices[k]}": str(v) for v, k in enumerate(returned_layers)}
in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers]
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
else:
m = nn.Sequential(
backbone,
# depthwise linear combination of channels to reduce their size
nn.Conv2d(backbone[-1].out_channels, out_channels, 1),
)
m.out_channels = out_channels # type: ignore[assignment]
return m
|
def _mobilenet_extractor(
backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3],
fpn: bool,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> nn.Module:
backbone = backbone.features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
num_stages = len(stage_indices)
# find the index of the layer from which we wont freeze
if trainable_layers < 0 or trainable_layers > num_stages:
raise ValueError(f"trainable_layers expected to be in between [0,{num_stages}], got {trainable_layers} ")
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]:
for parameter in b.parameters():
parameter.requires_grad_(False)
out_channels = 256
if fpn:
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [num_stages - 2, num_stages - 1]
if min(returned_layers) <= 0 or max(returned_layers) >= num_stages:
raise ValueError(
f" returned_layers object should contain integers between [1,{num_stages - 1}], got {returned_layers} "
)
return_layers = {f"{stage_indices[k]}": str(v) for v, k in enumerate(returned_layers)}
in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers]
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
else:
m = nn.Sequential(
backbone,
# depthwise linear combination of channels to reduce their size
nn.Conv2d(backbone[-1].out_channels, out_channels, 1),
)
m.out_channels = out_channels # type: ignore[assignment]
return m
|
41,730 |
def test_study_optimize_with_multiple_search_spaces():
# type: () -> None
def objective(trial):
# type: (Trial) -> float
a = trial.suggest_int('a', 0, 100)
b = trial.suggest_uniform('b', -100, 100)
return a * b
# Run 3 trials with a search space.
search_space_0 = {
'a': [0, 50],
'b': [-50, 0, 50]
} # type: Dict[str, List[GridValueType]]
sampler_0 = samplers.GridSampler(search_space_0)
study = optuna.create_study(sampler=sampler_0)
study.optimize(objective, n_trials=3)
assert len(study.trials) == 3
for t in study.trials:
sampler_0._same_search_space(t.system_attrs['search_space'])
# Run 2 trials with another space.
search_space_1 = {'a': [0, 25], 'b': [-50]} # type: Dict[str, List[GridValueType]]
sampler_1 = samplers.GridSampler(search_space_1)
study.sampler = sampler_1
study.optimize(objective, n_trials=2)
assert not sampler_0._same_search_space(sampler_1._search_space)
assert len(study.trials) == 5
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3: 5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
# Run 3 trials with the first search space again.
study.sampler = sampler_0
study.optimize(objective, n_trials=3)
assert len(study.trials) == 8
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3: 5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
for t in study.trials[5:]:
sampler_0._same_search_space(t.system_attrs['search_space'])
|
def test_study_optimize_with_multiple_search_spaces():
# type: () -> None
def objective(trial):
# type: (Trial) -> float
a = trial.suggest_int('a', 0, 100)
b = trial.suggest_uniform('b', -100, 100)
return a * b
# Run 3 trials with a search space.
search_space_0 = {
'a': [0, 50],
'b': [-50, 0, 50]
} # type: Dict[str, List[GridValueType]]
sampler_0 = samplers.GridSampler(search_space_0)
study = optuna.create_study(sampler=sampler_0)
study.optimize(objective, n_trials=3)
assert len(study.trials) == 3
for t in study.trials:
sampler_0._same_search_space(t.system_attrs['search_space'])
# Run 2 trials with another space.
search_space_1 = {'a': [0, 25], 'b': [-50]} # type: Dict[str, List[GridValueType]]
sampler_1 = samplers.GridSampler(search_space_1)
study.sampler = sampler_1
study.optimize(objective, n_trials=2)
assert not sampler_0._same_search_space(sampler_1._search_space)
assert len(study.trials) == 5
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3: 5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
# Run 3 trials with the first search space again.
study.sampler = sampler_0
study.optimize(objective, n_trials=3)
assert len(study.trials) == 8
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3:5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
for t in study.trials[5:]:
sampler_0._same_search_space(t.system_attrs['search_space'])
|
43,674 |
def _terms_to_qubit_operator(coeffs, ops, wires=None):
r"""Converts a 2-tuple of complex coefficients and PennyLane operations to
OpenFermion ``QubitOperator``.
This function is the inverse of ``_qubit_operator_to_terms``.
**Example usage:**
>>> coeffs = np.array([0.1, 0.2])
>>> ops = [
... qml.operation.Tensor(qml.PauliX(wires=['w0'])),
... qml.operation.Tensor(qml.PauliY(wires=['w0']), qml.PauliZ(wires=['w2']))
... ]
>>> _terms_to_qubit_operator(coeffs, ops, wires=Wires(['w0', 'w1', 'w2']))
0.1 [X0] +
0.2 [Y0 Z2]
Args:
coeffs (array[complex]):
coefficients for each observable, same length as ops
ops (Iterable[pennylane.operation.Observable]): List of PennyLane observables as
Tensor products of Pauli observables
wires (Wires, list, tuple, dict): Custom wire mapping for translating from Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only consecutive-int-valued dict (for wire-to-qubit conversion) is
accepted. If None, will use identiy map. Defaults to None.
Returns:
QubitOperator: an instance of OpenFermion's ``QubitOperator``.
"""
all_wires = Wires.all_wires([op.wires for op in ops], sort=True)
# n_all_wires = len(all_wires)
if wires is not None:
qubit_indexed_wires = _proc_wires(wires,)
if not set(all_wires).issubset(set(qubit_indexed_wires)):
raise ValueError("Supplied `wires` does not cover all wires defined in `ops`.")
else:
qubit_indexed_wires = all_wires
q_op = QubitOperator()
for coeff, op in zip(coeffs, ops):
# Pauli axis names, note s[-1] expects only 'Pauli{X,Y,Z}'
pauli_names = [s[-1] for s in op.name]
extra_obsvbs = set(op.name) - {"PauliX", "PauliY", "PauliZ", "Identity"}
if extra_obsvbs != set():
raise ValueError(
"Expected only PennyLane observables PauliX/Y/Z or Identity, "
+ "but also got {}.".format(extra_obsvbs)
)
if op.name == ["Identity"] and len(op.wires) == 1:
term_str = ""
else:
term_str = " ".join(
[
"{}{}".format(pauli, qubit_indexed_wires.index(wire))
for pauli, wire in zip(pauli_names, op.wires)
]
)
# This is how one makes QubitOperator in OpenFermion
q_op += coeff * QubitOperator(term_str)
return q_op
|
def _terms_to_qubit_operator(coeffs, ops, wires=None):
r"""Converts a 2-tuple of complex coefficients and PennyLane operations to
OpenFermion ``QubitOperator``.
This function is the inverse of ``_qubit_operator_to_terms``.
**Example**
>>> coeffs = np.array([0.1, 0.2])
>>> ops = [
... qml.operation.Tensor(qml.PauliX(wires=['w0'])),
... qml.operation.Tensor(qml.PauliY(wires=['w0']), qml.PauliZ(wires=['w2']))
... ]
>>> _terms_to_qubit_operator(coeffs, ops, wires=Wires(['w0', 'w1', 'w2']))
0.1 [X0] +
0.2 [Y0 Z2]
Args:
coeffs (array[complex]):
coefficients for each observable, same length as ops
ops (Iterable[pennylane.operation.Observable]): List of PennyLane observables as
Tensor products of Pauli observables
wires (Wires, list, tuple, dict): Custom wire mapping for translating from Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only consecutive-int-valued dict (for wire-to-qubit conversion) is
accepted. If None, will use identiy map. Defaults to None.
Returns:
QubitOperator: an instance of OpenFermion's ``QubitOperator``.
"""
all_wires = Wires.all_wires([op.wires for op in ops], sort=True)
# n_all_wires = len(all_wires)
if wires is not None:
qubit_indexed_wires = _proc_wires(wires,)
if not set(all_wires).issubset(set(qubit_indexed_wires)):
raise ValueError("Supplied `wires` does not cover all wires defined in `ops`.")
else:
qubit_indexed_wires = all_wires
q_op = QubitOperator()
for coeff, op in zip(coeffs, ops):
# Pauli axis names, note s[-1] expects only 'Pauli{X,Y,Z}'
pauli_names = [s[-1] for s in op.name]
extra_obsvbs = set(op.name) - {"PauliX", "PauliY", "PauliZ", "Identity"}
if extra_obsvbs != set():
raise ValueError(
"Expected only PennyLane observables PauliX/Y/Z or Identity, "
+ "but also got {}.".format(extra_obsvbs)
)
if op.name == ["Identity"] and len(op.wires) == 1:
term_str = ""
else:
term_str = " ".join(
[
"{}{}".format(pauli, qubit_indexed_wires.index(wire))
for pauli, wire in zip(pauli_names, op.wires)
]
)
# This is how one makes QubitOperator in OpenFermion
q_op += coeff * QubitOperator(term_str)
return q_op
|
44,760 |
def get_orderby_clauses(order_by_list, session):
"""Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
"""
clauses = []
ordering_joins = []
# contrary to filters, it is not easily feasible to separately handle sorting
# on attributes and on joined tables as we must keep all clauses in the same order
if order_by_list:
for order_by_clause in order_by_list:
(key_type, key, ascending) = SearchUtils.parse_order_by(order_by_clause)
subquery = None
if SearchUtils.is_attribute(key_type, '='):
order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
else:
if SearchUtils.is_metric(key_type, '='): # any valid comparator
entity = SqlLatestMetric
elif SearchUtils.is_tag(key_type, '='):
entity = SqlTag
elif SearchUtils.is_param(key_type, '='):
entity = SqlParam
else:
raise MlflowException("Invalid identifier type '%s'" % key_type,
error_code=INVALID_PARAMETER_VALUE)
# build a subquery first because we will join it in the main request so that the
# metric we want to sort on is available when we apply the sorting clause
subquery = session \
.query(entity) \
.filter(entity.key == key) \
.subquery()
ordering_joins.append(subquery)
order_value = subquery.c.value
# sqlite does not support NULLS LAST expression, so we sort first by
# presence of the field (and is_nan for metrics), then by actual value
if SearchUtils.is_metric(key_type, '='):
clauses.append(sql.case([
(subquery.c.is_nan.is_(True), 1),
(order_value.is_(None), 1)
],
else_=0))
else: # other entities do not have an 'is_nan' field
clauses.append(sql.case([(order_value.is_(None), 1)], else_=0))
if ascending:
clauses.append(order_value)
else:
clauses.append(order_value.desc())
clauses.append(SqlRun.start_time.desc())
clauses.append(SqlRun.run_uuid)
return clauses, ordering_joins
|
def _get_orderby_clauses(order_by_list, session):
"""Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
"""
clauses = []
ordering_joins = []
# contrary to filters, it is not easily feasible to separately handle sorting
# on attributes and on joined tables as we must keep all clauses in the same order
if order_by_list:
for order_by_clause in order_by_list:
(key_type, key, ascending) = SearchUtils.parse_order_by(order_by_clause)
subquery = None
if SearchUtils.is_attribute(key_type, '='):
order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
else:
if SearchUtils.is_metric(key_type, '='): # any valid comparator
entity = SqlLatestMetric
elif SearchUtils.is_tag(key_type, '='):
entity = SqlTag
elif SearchUtils.is_param(key_type, '='):
entity = SqlParam
else:
raise MlflowException("Invalid identifier type '%s'" % key_type,
error_code=INVALID_PARAMETER_VALUE)
# build a subquery first because we will join it in the main request so that the
# metric we want to sort on is available when we apply the sorting clause
subquery = session \
.query(entity) \
.filter(entity.key == key) \
.subquery()
ordering_joins.append(subquery)
order_value = subquery.c.value
# sqlite does not support NULLS LAST expression, so we sort first by
# presence of the field (and is_nan for metrics), then by actual value
if SearchUtils.is_metric(key_type, '='):
clauses.append(sql.case([
(subquery.c.is_nan.is_(True), 1),
(order_value.is_(None), 1)
],
else_=0))
else: # other entities do not have an 'is_nan' field
clauses.append(sql.case([(order_value.is_(None), 1)], else_=0))
if ascending:
clauses.append(order_value)
else:
clauses.append(order_value.desc())
clauses.append(SqlRun.start_time.desc())
clauses.append(SqlRun.run_uuid)
return clauses, ordering_joins
|
28,074 |
def perform_build_command(logfile, command, context, keep_link, silent=False,
verbose=None):
"""
Build the project and create a log file.
"""
LOG.info("Starting build...")
original_env = os.environ
try:
original_env_file = os.environ.get('CODECHECKER_ORIGINAL_BUILD_ENV')
if original_env_file:
LOG.debug_analyzer('Loading original build env from: %s',
original_env_file)
with open(original_env_file, 'rb') as env_file:
original_env = pickle.load(env_file, encoding='utf-8')
except Exception as ex:
LOG.warning(str(ex))
LOG.warning('Failed to get saved original_env '
'using a current copy for logging.')
original_env = os.environ.copy()
# Run user's commands in shell, and preload ldlogger.
if host_check.check_ldlogger(original_env):
# TODO: better platform detection.
if platform.system() == 'Linux':
LOG.info("Using CodeChecker ld-logger.")
# Same as linux's touch.
open(logfile, 'a', encoding="utf-8", errors="ignore").close()
log_env = env.get_log_env(logfile, context, original_env)
if 'CC_LOGGER_GCC_LIKE' not in log_env:
log_env['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++'
if keep_link or ('CC_LOGGER_KEEP_LINK' in log_env and
log_env['CC_LOGGER_KEEP_LINK'] == 'true'):
log_env['CC_LOGGER_KEEP_LINK'] = 'true'
is_debug = verbose and verbose in ['debug', 'debug_analyzer']
if not is_debug and 'CC_LOGGER_DEBUG_FILE' in log_env:
del log_env['CC_LOGGER_DEBUG_FILE']
elif is_debug and 'CC_LOGGER_DEBUG_FILE' not in log_env:
if 'CC_LOGGER_DEBUG_FILE' in os.environ:
log_file = os.environ['CC_LOGGER_DEBUG_FILE']
else:
log_file = os.path.join(os.path.dirname(logfile),
'codechecker.logger.debug')
if os.path.exists(log_file):
os.remove(log_file)
log_env['CC_LOGGER_DEBUG_FILE'] = log_file
elif platform.system() == 'Windows':
LOG.error("This command is not supported on Windows. You can use "
"the following tools to generate a compilation "
"database: \n"
" - CMake (CMAKE_EXPORT_COMPILE_COMMANDS)\n"
" - compiledb (https://pypi.org/project/compiledb/)")
sys.exit(1)
else:
LOG.error("Intercept-build is required to run CodeChecker in "
"OS X.")
sys.exit(1)
# Otherwise try to fall back to intercept-build.
elif host_check.check_intercept(original_env):
LOG.info("Using intercept-build.")
command = ' '.join(["intercept-build",
"--cdb", logfile,
"sh -c", shlex.quote(command)])
log_env = original_env
LOG.debug_analyzer(command)
else:
LOG.error("Both ldlogger and intercept-build are unavailable.\n"
"Try acquiring the compilation_commands.json in another "
"way.")
sys.exit(1)
LOG.debug_analyzer(log_env)
try:
ret_code = execute_buildcmd(command, silent, log_env)
if ret_code == 0:
LOG.info("Build finished successfully.")
LOG.debug_analyzer("The logfile is: %s", logfile)
else:
LOG.info("Build failed.")
sys.exit(ret_code)
except Exception as ex:
LOG.error("Calling original build command failed.")
LOG.error(str(ex))
sys.exit(1)
finally:
debug_file = log_env.get('CC_LOGGER_DEBUG_FILE')
if debug_file:
LOG.info("The debug log file is: %s", debug_file)
debug_logfile_lock = debug_file + '.lock'
if os.path.exists(debug_logfile_lock):
os.remove(debug_logfile_lock)
# Removing flock lock file.
logfile_lock = logfile + '.lock'
if os.path.exists(logfile_lock):
os.remove(logfile_lock)
|
def perform_build_command(logfile, command, context, keep_link, silent=False,
verbose=None):
"""
Build the project and create a log file.
"""
LOG.info("Starting build...")
original_env = os.environ
try:
original_env_file = os.environ.get('CODECHECKER_ORIGINAL_BUILD_ENV')
if original_env_file:
LOG.debug_analyzer('Loading original build env from: %s',
original_env_file)
with open(original_env_file, 'rb') as env_file:
original_env = pickle.load(env_file, encoding='utf-8')
except Exception as ex:
LOG.warning(str(ex))
LOG.warning('Failed to get saved original_env '
'using a current copy for logging.')
original_env = os.environ.copy()
# Run user's commands in shell, and preload ldlogger.
if host_check.check_ldlogger(os.environ):
# TODO: better platform detection.
if platform.system() == 'Linux':
LOG.info("Using CodeChecker ld-logger.")
# Same as linux's touch.
open(logfile, 'a', encoding="utf-8", errors="ignore").close()
log_env = env.get_log_env(logfile, context, original_env)
if 'CC_LOGGER_GCC_LIKE' not in log_env:
log_env['CC_LOGGER_GCC_LIKE'] = 'gcc:g++:clang:clang++:cc:c++'
if keep_link or ('CC_LOGGER_KEEP_LINK' in log_env and
log_env['CC_LOGGER_KEEP_LINK'] == 'true'):
log_env['CC_LOGGER_KEEP_LINK'] = 'true'
is_debug = verbose and verbose in ['debug', 'debug_analyzer']
if not is_debug and 'CC_LOGGER_DEBUG_FILE' in log_env:
del log_env['CC_LOGGER_DEBUG_FILE']
elif is_debug and 'CC_LOGGER_DEBUG_FILE' not in log_env:
if 'CC_LOGGER_DEBUG_FILE' in os.environ:
log_file = os.environ['CC_LOGGER_DEBUG_FILE']
else:
log_file = os.path.join(os.path.dirname(logfile),
'codechecker.logger.debug')
if os.path.exists(log_file):
os.remove(log_file)
log_env['CC_LOGGER_DEBUG_FILE'] = log_file
elif platform.system() == 'Windows':
LOG.error("This command is not supported on Windows. You can use "
"the following tools to generate a compilation "
"database: \n"
" - CMake (CMAKE_EXPORT_COMPILE_COMMANDS)\n"
" - compiledb (https://pypi.org/project/compiledb/)")
sys.exit(1)
else:
LOG.error("Intercept-build is required to run CodeChecker in "
"OS X.")
sys.exit(1)
# Otherwise try to fall back to intercept-build.
elif host_check.check_intercept(original_env):
LOG.info("Using intercept-build.")
command = ' '.join(["intercept-build",
"--cdb", logfile,
"sh -c", shlex.quote(command)])
log_env = original_env
LOG.debug_analyzer(command)
else:
LOG.error("Both ldlogger and intercept-build are unavailable.\n"
"Try acquiring the compilation_commands.json in another "
"way.")
sys.exit(1)
LOG.debug_analyzer(log_env)
try:
ret_code = execute_buildcmd(command, silent, log_env)
if ret_code == 0:
LOG.info("Build finished successfully.")
LOG.debug_analyzer("The logfile is: %s", logfile)
else:
LOG.info("Build failed.")
sys.exit(ret_code)
except Exception as ex:
LOG.error("Calling original build command failed.")
LOG.error(str(ex))
sys.exit(1)
finally:
debug_file = log_env.get('CC_LOGGER_DEBUG_FILE')
if debug_file:
LOG.info("The debug log file is: %s", debug_file)
debug_logfile_lock = debug_file + '.lock'
if os.path.exists(debug_logfile_lock):
os.remove(debug_logfile_lock)
# Removing flock lock file.
logfile_lock = logfile + '.lock'
if os.path.exists(logfile_lock):
os.remove(logfile_lock)
|
5,919 |
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
result.did_create(
Path('scratch') / 'target' / 'simple'
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
result.did_not_update(
Path('scratch') / 'target' / 'simple'
)
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
result.did_update(
Path('scratch') / 'target' / 'simple'
)
egg_folder = (
Path('scratch') / 'target' /
'simple-2.0-py{pyversion}.egg-info'.format(**globals()))
result.did_create(egg_folder)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
result.did_create(singlemodule_py)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
result.did_update(singlemodule_py)
|
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
result.did_create(
Path('scratch') / 'target' / 'simple'
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
result.did_not_update(
Path('scratch') / 'target' / 'simple'
)
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
result.did_update(Path('scratch') / 'target' / 'simple')
egg_folder = (
Path('scratch') / 'target' /
'simple-2.0-py{pyversion}.egg-info'.format(**globals()))
result.did_create(egg_folder)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
result.did_create(singlemodule_py)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
result.did_update(singlemodule_py)
|
40,769 |
def test_expand_hostlist_unvalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
|
def test_expand_hostlist_invalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
|
55,752 |
def _pycharm_has_eventloop(app: QApplication) -> bool:
"""Return true if running in PyCharm and eventloop is active
PyCharm runs a custom interactive shell which overrides
`InteractiveShell.enable_gui()`, breaking some superclass behaviour.
"""
in_pycharm = 'PYCHARM_HOSTED' in os.environ
in_event_loop = getattr(app, '_in_event_loop', False)
return in_pycharm and in_event_loop
|
def _pycharm_has_eventloop(app: QApplication) -> bool:
"""Return true if running in PyCharm and eventloop is active.
PyCharm runs a custom interactive shell which overrides
`InteractiveShell.enable_gui()`, breaking some superclass behaviour.
"""
in_pycharm = 'PYCHARM_HOSTED' in os.environ
in_event_loop = getattr(app, '_in_event_loop', False)
return in_pycharm and in_event_loop
|
31,383 |
def convert_string_to_date_format(date: str, date_format: str = DATE_FORMAT) -> Optional[str]:
"""
Convert date into given format
:param date: date string
:param date_format: output date format
:return: human readable date
"""
date_obj = dateparser.parse(date)
if date_obj:
return date_obj.strftime(date_format)
return None
|
def convert_string_to_date_format(date: str, date_format: str = DATE_FORMAT) -> Optional[str]:
"""
Convert date into given format
:param date: date string
:param date_format: output date format
:return: human readable date
"""
date_obj = dateparser.parse(date)
if date_obj:
return date_obj.strftime(date_format)
return None
|
32,364 |
def set_password_not_expire(default_base_dn):
args = demisto.args()
sam_account_name = args.get('username')
pwd_n_exp = args.get('value')
# query by sAMAccountName
if sam_account_name or args.get('sAMAccountName'):
if sam_account_name:
username = escape_filter_chars(sam_account_name)
else:
username = escape_filter_chars(args['sAMAccountName'])
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(username)
entries = search_with_paging(query, default_base_dn, attributes='userAccountControl', size_limit=0, time_limit=0)
user = entries.get('flat')[0]
user_account_control = user.get('userAccountControl')[0]
# Check if UAC flag for "Password Never Expire" (0x10000) is set to True or False:
if pwd_n_exp == 'true':
# Sets the bit 16 to 1
user_account_control |= 1 << 16
content_output = f"AD account {username} has set \"password never expire\" attribute. Value is set to True"
else:
# Clears the bit 16 to 0
user_account_control &= ~(1 << 16)
content_output = f"AD account {username} has cleared \"password never expire\" attribute. Value is set to False"
attribute_name = 'userAccountControl'
attribute_value = user_account_control
dn = user_dn(sam_account_name, default_base_dn)
modification = {}
modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)]
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': content_output
}
demisto.results(demisto_entry)
|
def set_password_not_expire(default_base_dn):
args = demisto.args()
sam_account_name = args.get('username')
pwd_n_exp = args.get('value')
# query by sAMAccountName
if sam_account_name or args.get('sAMAccountName'):
if sam_account_name:
username = escape_filter_chars(sam_account_name)
else:
username = escape_filter_chars(args['sAMAccountName'])
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(username)
entries = search_with_paging(query, default_base_dn, attributes='userAccountControl', size_limit=0, time_limit=0)
user = entries.get('flat')[0]
user_account_control = user.get('userAccountControl')[0]
# Check if UAC flag for "Password Never Expire" (0x10000) is set to True or False:
if pwd_n_exp == 'true':
# Sets the bit 16 to 1
user_account_control |= 1 << 16
content_output = f"AD account {username} has set \"password never expire\" attribute. Value is set to True"
else:
# Clears the bit 16 to 0
user_account_control &= ~(1 << 16)
content_output = f"AD account {username} has cleared \"password never expire\" attribute. Value is set to False"
attribute_name = 'userAccountControl'
attribute_value = user_account_control
dn = user_dn(sam_account_name, default_base_dn)
modification = {attribute_name: [('MODIFY_REPLACE', attribute_value)]}
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': content_output
}
demisto.results(demisto_entry)
|
5,889 |
def egg_link_path(dist):
# type: (Distribution) -> Optional[str]
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
return None
|
def egg_link_path(dist):
# type: (Distribution) -> Optional[str]
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if not virtualenv_no_global():
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
return None
|
41,196 |
def test_versions_are_the_same():
mods = modules.list_modules(include_parent=True)
versions = {m.name: m.version for m in mods}
assert len(set(versions.values())) == 1, (
f"Versions should be the same, " f"instead: \n{versions}"
)
|
def test_versions_are_the_same():
mods = modules.list_modules(include_parent=True)
versions = {m.name: m.version for m in mods}
assert len(set(versions.values())) == 1, (
f"Versions should be the same, instead: \n{versions}"
)
|
19,994 |
def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss)
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
|
def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss)
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param rgb_img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
|
32,658 |
def _put_get_user_stage_s3_regional_url(
tmpdir,
conn_cnx,
db_parameters,
number_of_files=1,
number_of_lines=1,
from_path=True,
):
try:
with conn_cnx(
user=db_parameters["user"],
account=db_parameters["account"],
password=db_parameters["password"],
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = true;"
)
_put_get_user_stage(
tmpdir, conn_cnx, db_parameters, number_of_files, number_of_lines, from_path
)
finally:
with conn_cnx(
user=db_parameters["user"],
account=db_parameters["account"],
password=db_parameters["password"],
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = false;"
)
|
def _put_get_user_stage_s3_regional_url(
tmpdir,
conn_cnx,
db_parameters,
number_of_files=1,
number_of_lines=1,
from_path=True,
):
try:
with conn_cnx(
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = true;"
)
_put_get_user_stage(
tmpdir, conn_cnx, db_parameters, number_of_files, number_of_lines, from_path
)
finally:
with conn_cnx(
user=db_parameters["user"],
account=db_parameters["account"],
password=db_parameters["password"],
role="accountadmin",
) as cnx:
cnx.cursor().execute(
"alter account set ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = false;"
)
|
27,785 |
def _parse_ini_file(path: Path) -> PARSE_RESULT:
"""Parses .ini files with expected pytest.ini sections
todo: investigate if tool:pytest should be added
"""
iniconfig = _parse_ini_config(path)
if "pytest" in iniconfig:
return dict(iniconfig["pytest"].items())
return None
|
def _parse_ini_file(path: Path) -> PARSE_RESULT:
"""Parses .ini files with expected pytest.ini sections.
todo: investigate if tool:pytest should be added
"""
iniconfig = _parse_ini_config(path)
if "pytest" in iniconfig:
return dict(iniconfig["pytest"].items())
return None
|
3,511 |
def _clear_html_files_from_cloud_storage(version):
"""Removes html files from cloud storage for a given version of a project."""
storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()
storage_path = version.project.get_storage_path(
type_='html',
version_slug=version.slug,
include_file=False,
)
storage.delete_directory(storage_path)
|
def _clear_html_files_from_cloud_storage(version):
"""Removes html files from media storage (cloud or local) for a given version of a project."""
storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()
storage_path = version.project.get_storage_path(
type_='html',
version_slug=version.slug,
include_file=False,
)
storage.delete_directory(storage_path)
|
2,830 |
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean", metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Array containing points.
Y : array-like of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
pairwise_distances : Distances between pairs of elements of X and Y.
pairwise_distances_argmin_min : Minimum distances between one point and a
set of points.
"""
if metric_kwargs is None:
metric_kwargs = {}
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if PairwiseDistancesArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
if metric_kwargs.get("squared", False) and metric == "euclidean":
metric = "sqeuclidean"
metric_kwargs = {}
indices = PairwiseDistancesArgKmin.compute(
X=X,
Y=Y,
k=1,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="auto",
return_distance=False,
)
indices = indices.flatten()
else:
# TODO: once PairwiseDistancesArgKmin supports sparse input matrices and 32 bit,
# we won't need to fallback to pairwise_distances_chunked anymore.
# Turn off check for finiteness because this is costly and because arrays
# have already been validated.
with config_context(assume_finite=True):
indices = np.concatenate(
list(
# This returns a np.ndarray generator whose arrays we need
# to flatten into one.
pairwise_distances_chunked(
X, Y, reduce_func=_argmin_reduce, metric=metric, **metric_kwargs
)
)
)
return indices
|
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean", metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Array containing points.
Y : array-like of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
pairwise_distances : Distances between every pair of samples of X and Y.
pairwise_distances_argmin_min : Minimum distances between one point and a
set of points.
"""
if metric_kwargs is None:
metric_kwargs = {}
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if PairwiseDistancesArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
if metric_kwargs.get("squared", False) and metric == "euclidean":
metric = "sqeuclidean"
metric_kwargs = {}
indices = PairwiseDistancesArgKmin.compute(
X=X,
Y=Y,
k=1,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="auto",
return_distance=False,
)
indices = indices.flatten()
else:
# TODO: once PairwiseDistancesArgKmin supports sparse input matrices and 32 bit,
# we won't need to fallback to pairwise_distances_chunked anymore.
# Turn off check for finiteness because this is costly and because arrays
# have already been validated.
with config_context(assume_finite=True):
indices = np.concatenate(
list(
# This returns a np.ndarray generator whose arrays we need
# to flatten into one.
pairwise_distances_chunked(
X, Y, reduce_func=_argmin_reduce, metric=metric, **metric_kwargs
)
)
)
return indices
|
30,895 |
def get_indicators_command(client, args: dict) -> CommandResults:
""" Gets indicator from opencti to readable output
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_type = args.get("indicator_types")
last_run_id = args.get('last_id')
limit = int(args.get('limit', 500))
last_run_id, indicators_list = get_indicators(client, indicator_type, last_run_id, limit)
if indicators_list:
readable_output = tableToMarkdown('Indicators from OpenCTI', indicators_list, headers=["type", "value"])
return CommandResults(
readable_output=readable_output,
raw_response=indicators_list
)
else:
return CommandResults(readable_output='No indicators')
|
def get_indicators_command(client, args: dict) -> CommandResults:
""" Gets indicator from opencti to readable output
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_type = args.get("indicator_types")
last_run_id = args.get('last_id')
limit = int(args.get('limit', 500))
last_run_id, indicators_list = get_indicators(client, indicator_type, last_run_id, limit=limit)
if indicators_list:
readable_output = tableToMarkdown('Indicators from OpenCTI', indicators_list, headers=["type", "value"])
return CommandResults(
readable_output=readable_output,
raw_response=indicators_list
)
else:
return CommandResults(readable_output='No indicators')
|
7,100 |
def get_source_workflow_name(source: Path) -> str:
"""Return workflow name relative to configured source dirs if possible,
else the basename of the given path."""
for dir_ in get_source_dirs():
try:
workflow_name = str(source.relative_to(Path(dir_).expanduser()))
return workflow_name
except ValueError:
continue
return source.name
|
def get_source_workflow_name(source: Path) -> str:
"""Return workflow name relative to configured source dirs if possible,
else the basename of the given path."""
for dir_ in get_source_dirs():
try:
return str(source.relative_to(expand_path(dir_)))
except ValueError:
continue
return source.name
|
21,940 |
def _process_time_response(
sys, tout, yout, xout, transpose=None, return_x=False,
squeeze=None, input=None, output=None):
"""Process time response signals.
This function processes the outputs of the time response functions and
processes the transpose and squeeze keywords.
Parameters
----------
T : 1D array
Time values of the output
yout : ndarray
Response of the system. This can either be a 1D array indexed by time
(for SISO systems), a 2D array indexed by output and time (for MIMO
systems with no input indexing, such as initial_response or forced
response) or a 3D array indexed by output, input, and time.
xout : array, optional
Individual response of each x variable (if return_x is True). For a
SISO system (or if a single input is specified), This should be a 2D
array indexed by the state index and time (for single input systems)
or a 3D array indexed by state, input, and time.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`). Default
value is False.
return_x : bool, optional
If True, return the state vector (default = False).
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO) then the
output response is returned as a 1D array (indexed by time). If
squeeze=True, remove single-dimensional entries from the shape of the
output even if the system is not SISO. If squeeze=False, keep the
output as a 3D array (indexed by the output, input, and time) even if
the system is SISO. The default value can be set using
config.defaults['control.squeeze_time_response'].
input : int, optional
If present, the response represents ony the listed input.
output : int, optional
If present, the response represents ony the listed input.
Returns
-------
T : 1D array
Time values of the output
yout : ndarray
Response of the system. If the system is SISO and squeeze is not
True, the array is 1D (indexed by time). If the system is not SISO or
squeeze is False, the array is either 2D (indexed by output and time)
or 3D (indexed by input, output, and time).
xout : array, optional
Individual response of each x variable (if return_x is True). For a
SISO system (or if a single input is specified), xout is a 2D array
indexed by the state index and time. For a non-SISO system, xout is a
3D array indexed by the state, the input, and time. The shape of xout
is not affected by the ``squeeze`` keyword.
"""
# If squeeze was not specified, figure out the default (might remain None)
if squeeze is None:
squeeze = config.defaults['control.squeeze_time_response']
# Determine if the system is SISO
issiso = sys.issiso() or (input is not None and output is not None)
# Figure out whether and how to squeeze output data
if squeeze is True: # squeeze all dimensions
yout = np.squeeze(yout)
elif squeeze is False: # squeeze no dimensions
pass
elif squeeze is None: # squeeze signals if SISO
if issiso:
if len(yout.shape) == 3:
yout = yout[0][0] # remove input and output
else:
yout = yout[0] # remove input
else:
raise ValueError("unknown squeeze value")
# Figure out whether and how to squeeze the state data
if issiso and len(xout.shape) > 2:
xout = xout[:, 0, :] # remove input
# See if we need to transpose the data back into MATLAB form
if transpose:
# Transpose time vector in case we are using np.matrix
tout = np.transpose(tout)
# For signals, put the last index (time) into the first slot
yout = np.transpose(yout, np.roll(range(yout.ndim), 1))
xout = np.transpose(xout, np.roll(range(xout.ndim), 1))
# Return time, output, and (optionally) state
return (tout, yout, xout) if return_x else (tout, yout)
|
def _process_time_response(
sys, tout, yout, xout, transpose=None, return_x=False,
squeeze=None, input=None, output=None):
"""Process time response signals.
This function processes the outputs of the time response functions and
processes the transpose and squeeze keywords.
Parameters
----------
T : 1D array
Time values of the output
yout : ndarray
Response of the system. This can either be a 1D array indexed by time
(for SISO systems), a 2D array indexed by output and time (for MIMO
systems with no input indexing, such as initial_response or forced
response) or a 3D array indexed by output, input, and time.
xout : array, optional
Individual response of each x variable (if return_x is True). For a
SISO system (or if a single input is specified), This should be a 2D
array indexed by the state index and time (for single input systems)
or a 3D array indexed by state, input, and time.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`). Default
value is False.
return_x : bool, optional
If True, return the state vector (default = False).
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO) then the
output response is returned as a 1D array (indexed by time). If
squeeze=True, remove single-dimensional entries from the shape of the
output even if the system is not SISO. If squeeze=False, keep the
output as a 3D array (indexed by the output, input, and time) even if
the system is SISO. The default value can be set using
config.defaults['control.squeeze_time_response'].
input : int, optional
If present, the response represents ony the listed input.
output : int, optional
If present, the response represents only the listed output.
Returns
-------
T : 1D array
Time values of the output
yout : ndarray
Response of the system. If the system is SISO and squeeze is not
True, the array is 1D (indexed by time). If the system is not SISO or
squeeze is False, the array is either 2D (indexed by output and time)
or 3D (indexed by input, output, and time).
xout : array, optional
Individual response of each x variable (if return_x is True). For a
SISO system (or if a single input is specified), xout is a 2D array
indexed by the state index and time. For a non-SISO system, xout is a
3D array indexed by the state, the input, and time. The shape of xout
is not affected by the ``squeeze`` keyword.
"""
# If squeeze was not specified, figure out the default (might remain None)
if squeeze is None:
squeeze = config.defaults['control.squeeze_time_response']
# Determine if the system is SISO
issiso = sys.issiso() or (input is not None and output is not None)
# Figure out whether and how to squeeze output data
if squeeze is True: # squeeze all dimensions
yout = np.squeeze(yout)
elif squeeze is False: # squeeze no dimensions
pass
elif squeeze is None: # squeeze signals if SISO
if issiso:
if len(yout.shape) == 3:
yout = yout[0][0] # remove input and output
else:
yout = yout[0] # remove input
else:
raise ValueError("unknown squeeze value")
# Figure out whether and how to squeeze the state data
if issiso and len(xout.shape) > 2:
xout = xout[:, 0, :] # remove input
# See if we need to transpose the data back into MATLAB form
if transpose:
# Transpose time vector in case we are using np.matrix
tout = np.transpose(tout)
# For signals, put the last index (time) into the first slot
yout = np.transpose(yout, np.roll(range(yout.ndim), 1))
xout = np.transpose(xout, np.roll(range(xout.ndim), 1))
# Return time, output, and (optionally) state
return (tout, yout, xout) if return_x else (tout, yout)
|
51,913 |
def visit_directory_tree(root, visitor, rel_path='', depth=0):
"""
Recurses the directory root depth-first through a visitor pattern
The visitor interface is as follows:
- handle_file(root, rel_path, depth) -> bool
if False is returned, the iteration over children in the current dir is stopped
- before_enter_dir(root, rel_path, depth) -> bool
if True is returned, descends into this directory
- before_enter_symlinked_dir(root, rel_path, depth) -> bool
if True is returne,d descends into this directory
- after_enter_dir(root, rel_path, depth) -> void
only called when before_enter_dir returns True
- after_enter_symlinked_dir(root, rel_path, depth) -> void
only called when before_enter_symlinked_dir returns True
"""
dir = os.path.join(root, rel_path)
for f in os.listdir(dir):
rel_child = os.path.join(rel_path, f)
child = os.path.join(dir, f)
st, lst = None, None
is_link, is_dir = False, False
# lstat first to avoid having to stat when not a symlink
try:
lst = os.lstat(child)
is_link = stat.S_ISLNK(lst.st_mode)
except OSError:
pass
# if lstat succeeded and we don't have a link, check wether we have a dir
if lst is not None and not is_link:
is_dir = stat.S_ISDIR(lst.st_mode)
# if lstat succeeded and we have a symlink, we must verify it's a dir
# through a stat call.
if lst is not None and is_link:
# if it was a symlink, do an actual stat
try:
st = os.stat(child)
is_dir = stat.S_ISDIR(st.st_mode)
except OSError:
pass
# if neither lstat nor stat worked, then we can't do anything... maybe the
# files don't exist anymore.
if lst is None and st is None:
continue
if not is_dir:
# Handle files
if not visitor.handle_file(root, rel_child, depth):
break
elif not is_link and visitor.before_enter_dir(root, rel_child, depth):
# Handle ordinary directories
visit_directory_tree(root, visitor, rel_child, depth + 1)
visitor.after_enter_dir(root, rel_child, depth)
elif is_link and visitor.before_enter_symlinked_dir(root, rel_child, depth):
# Handle symlinked directories
visit_directory_tree(root, visitor, rel_child, depth + 1)
visitor.after_enter_symlinked_dir(root, rel_child, depth)
|
def visit_directory_tree(root, visitor, rel_path='', depth=0):
"""
Recurses the directory root depth-first through a visitor pattern
The visitor interface is as follows:
- handle_file(root, rel_path, depth) -> bool
if False is returned, the iteration over children in the current dir is stopped
- before_enter_dir(root, rel_path, depth) -> bool
if True is returned, descends into this directory
- before_enter_symlinked_dir(root, rel_path, depth) -> bool
if True is returne,d descends into this directory
- after_enter_dir(root, rel_path, depth) -> void
only called when before_enter_dir returns True
- after_enter_symlinked_dir(root, rel_path, depth) -> void
only called when before_enter_symlinked_dir returns True
"""
dir = os.path.join(root, rel_path)
for f in os.listdir(dir):
rel_child = os.path.join(rel_path, f)
child = os.path.join(dir, f)
st, lst = None, None
is_link, is_dir = False, False
# lstat first to avoid having to stat when not a symlink
try:
lst = os.lstat(child)
is_link = stat.S_ISLNK(lst.st_mode)
except OSError:
pass
# if lstat succeeded and we don't have a link, check wether we have a dir
# if lstat succeeded and we have a symlink, we must verify it's a dir
# through a stat call.
if is_link:
# if it was a symlink, do an actual stat
try:
st = os.stat(child)
is_dir = stat.S_ISDIR(st.st_mode)
except OSError:
pass
else:
is_dir = stat.S_ISDIR(lst.st_mode)
"""Context manager to remove the files passed as input, but restore
them in case any exception is raised in the context block.
Args:
*files_or_dirs: glob expressions for files or directories
to be removed
Returns:
Dictionary that maps deleted files to their temporary copy
within the context block.
"""
# Find all the files or directories that match
glob_matches = [glob.glob(x) for x in files_or_dirs]
# Sort them so that shorter paths like "/foo/bar" come before
# nested paths like "/foo/bar/baz.yaml". This simplifies the
# handling of temporary copies below
sorted_matches = sorted([
os.path.abspath(x) for x in itertools.chain(*glob_matches)
], key=len)
# Copy files and directories in a temporary location
removed, dst_root = {}, tempfile.mkdtemp()
try:
for id, file_or_dir in enumerate(sorted_matches):
# The glob expression at the top ensures that the file/dir exists
# at the time we enter the loop. Double check here since it might
# happen that a previous iteration of the loop already removed it.
# This is the case, for instance, if we remove the directory
# "/foo/bar" before the file "/foo/bar/baz.yaml".
if not os.path.exists(file_or_dir):
continue
# The monotonic ID is a simple way to make the filename
# or directory name unique in the temporary folder
basename = os.path.basename(file_or_dir) + '-{0}'.format(id)
temporary_path = os.path.join(dst_root, basename)
shutil.move(file_or_dir, temporary_path)
removed[file_or_dir] = temporary_path
yield removed
except BaseException:
# Restore the files that were removed
for original_path, temporary_path in removed.items():
shutil.move(temporary_path, original_path)
raise
|
17,730 |
def RB_to_CHARMM(c0, c1, c2, c3, c4, c5):
"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type
or
RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5
where Psi= t-Pi = t - 180 degress
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
converts to:
CHARMM_torsions =
= K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
= K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
Returns
-------
0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol)
CHARMM_ dihedral coeffs : np.matrix, shape=(6,3)
Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3],
[K4, n4, d4], [K5, n5, d5]] (in kcal/mol)
"""
# see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5
K0 = (c0 -c1 - c3 - c4/4 - c5)
K1 = (+c1 + 3/4 * c3 + 5/8 * c5)
K2 = (+(1/2) * c2 + 1/2 * c4)
K3 = (+(1/4) * c3 + 5/16 * c5)
K4 = (+(1/8) * c4)
K5 = (+(1/16) * c5)
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]])
|
def RB_to_CHARMM(c0, c1, c2, c3, c4, c5):
"""Converts Ryckaert-Bellemans (RB) type dihedrals to CHARMM type
or
RB_torsions = c0 + c1*Cos[Psi] + c2*Cos[Psi]^2 + c3*CosPsi]^3 + c4*Cos[Psi]^4 + c5*Cos[5*Psi]^5
where Psi= t-Pi = t - 180 degress
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
converts to:
CHARMM_torsions =
= K0 * (1 + Cos[n0*(t) - (d0)] ) + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
= K0 + K1 * (1 + Cos[n1*(t) - (d1)] ) + K2 * (1 + Cos[n2*(t) - (d2)] )
+ K3 * (1 + Cos[n3*(t) - (d3)] ) + K4 * (1 + Cos[n4*(t) - (d4)] ) + K5 * (1 + Cos[n5*(t) - (d5)] ) .
Returns
-------
0, K1, K2, K3, K4, K5, n0, n1, n2, n3, n4, n5, d0, d1, d2, d3, d4, and d5 : Charmm coefficients (in kcal/mol)
CHARMM_ dihedral coeffs : np.matrix, shape=(6,3)
Array containing the CHARMM dihedral coeffs [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3],
[K4, n4, d4], [K5, n5, d5]] (in kcal/mol)
"""
# see below or the long version is, K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5
K0 = (c0 + (-c1) - c3 - (c4/4) - c5)
K1 = (+c1 + 3/4 * c3 + 5/8 * c5)
K2 = (+(1/2) * c2 + 1/2 * c4)
K3 = (+(1/4) * c3 + 5/16 * c5)
K4 = (+(1/8) * c4)
K5 = (+(1/16) * c5)
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
return np.matrix([[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]])
|
3,373 |
def strategy(
ids: Sequence[str],
interface: Type[Interface],
score: Optional[int] = None,
) -> Callable[[StrategyFunc], "Strategy"]:
"""
Registers a strategy
:param ids: The strategy/delegate IDs with which to register
:param interface: Which interface type should be dispatched to this strategy
:param score: Determines precedence of strategies. For example exception
strategy scores higher than message strategy, so if both interfaces are
in the event, only exception will be used for hash
"""
name = interface.path
if not ids:
raise TypeError("neither id nor ids given")
def decorator(f: StrategyFunc) -> Strategy:
assert ids
for id in ids:
STRATEGIES[id] = rv = Strategy(
id=id, name=name, interface=interface.path, score=score, func=f
)
return rv
return decorator
|
def strategy(
ids: Sequence[str],
interface: Type[Interface],
score: Optional[int] = None,
) -> Callable[[StrategyFunc], "Strategy"]:
"""
Registers a strategy
:param ids: The strategy/delegate IDs with which to register
:param interface: Which interface type should be dispatched to this strategy
:param score: Determines precedence of strategies. For example exception
strategy scores higher than message strategy, so if both interfaces are
in the event, only exception will be used for hash
"""
name = interface.path
if not ids:
raise TypeError("no ids given")
def decorator(f: StrategyFunc) -> Strategy:
assert ids
for id in ids:
STRATEGIES[id] = rv = Strategy(
id=id, name=name, interface=interface.path, score=score, func=f
)
return rv
return decorator
|
35,296 |
def twin_to_client_properties(twin):
"""Given a `Twin` object, return a `ClientPropertie`s object"""
obj = ClientProperties()
obj.backing_object = twin.reported_properties
obj.writable_properties_requests.backing_object = twin.desired_properties
|
def twin_to_client_properties(twin):
"""Given a `Twin` object, return a `ClientProperties` object"""
obj = ClientProperties()
obj.backing_object = twin.reported_properties
obj.writable_properties_requests.backing_object = twin.desired_properties
|
4,528 |
def test_basic(tmpdir, capsys):
"""Test some basic functionality."""
assert cs.main('_does_not_exist_') == 0
fname = op.join(str(tmpdir), 'tmp')
with open(fname, 'w') as f:
pass
code, _, stderr = cs.main('-D', 'foo', f.name, std=True)
assert code == EX_USAGE, 'missing dictionary'
assert 'cannot find dictionary' in stderr
assert cs.main(fname) == 0, 'empty file'
with open(fname, 'a') as f:
f.write('this is a test file\n')
assert cs.main(fname) == 0, 'good'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 1, 'bad'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 2, 'worse'
with open(fname, 'a') as f:
f.write('tim\ngonna\n')
assert cs.main(fname) == 2, 'with a name'
assert cs.main('--builtin', 'clear,rare,names,informal', fname) == 4
code, _, stderr = cs.main(fname, '--builtin', 'foo', std=True)
assert code == EX_USAGE # bad type
assert 'Unknown builtin dictionary' in stderr
d = str(tmpdir)
code, _, stderr = cs.main(fname, '-D', op.join(d, 'foo'), std=True)
assert code == EX_USAGE # bad dict
assert 'cannot find dictionary' in stderr
os.remove(fname)
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned\nAbandonned\nABANDONNED\nAbAnDoNnEd\nabandonned\rAbandonned\r\nABANDONNED \nAbAnDoNnEd') # noqa: E501
assert cs.main(d) == 8
code, _, stderr = cs.main('-w', d, std=True)
assert code == 0
assert 'FIXED:' in stderr
with open(op.join(d, 'bad.txt')) as f:
new_content = f.read()
assert cs.main(d) == 0
assert new_content == 'abandoned\nAbandoned\nABANDONED\nabandoned\nabandoned\rAbandoned\r\nABANDONED \nabandonned' # noqa: E501
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned abandonned\n')
assert cs.main(d) == 2
code, stdout, stderr = cs.main(
'-q', '16', '-w', d, count=False, std=True)
assert code == 0
assert stdout == stderr == ''
assert cs.main(d) == 0
# empty directory
os.mkdir(op.join(d, 'test'))
assert cs.main(d) == 0
|
def test_basic(tmpdir, capsys):
"""Test some basic functionality."""
assert cs.main('_does_not_exist_') == 0
fname = op.join(str(tmpdir), 'tmp')
with open(fname, 'w') as f:
pass
code, _, stderr = cs.main('-D', 'foo', f.name, std=True)
assert code == EX_USAGE, 'missing dictionary'
assert 'cannot find dictionary' in stderr
assert cs.main(fname) == 0, 'empty file'
with open(fname, 'a') as f:
f.write('this is a test file\n')
assert cs.main(fname) == 0, 'good'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 1, 'bad'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 2, 'worse'
with open(fname, 'a') as f:
f.write('tim\ngonna\n')
assert cs.main(fname) == 2, 'with a name'
assert cs.main('--builtin', 'clear,rare,names,informal', fname) == 4
code, _, stderr = cs.main(fname, '--builtin', 'foo', std=True)
assert code == EX_USAGE # bad type
assert 'Unknown builtin dictionary' in stderr
d = str(tmpdir)
code, _, stderr = cs.main(fname, '-D', op.join(d, 'foo'), std=True)
assert code == EX_USAGE # bad dict
assert 'cannot find dictionary' in stderr
os.remove(fname)
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned\nAbandonned\nABANDONNED\nAbAnDoNnEd\nabandonned\rAbandonned\r\nABANDONNED \nAbAnDoNnEd') # noqa: E501
assert cs.main(d) == 8
code, _, stderr = cs.main('-w', d, std=True)
assert code == 0
assert 'FIXED:' in stderr
with open(op.join(d, 'bad.txt')) as f:
new_content = f.read()
assert cs.main(d) == 0
assert new_content == 'abandoned\nAbandoned\nABANDONED\nabandoned\nabandoned\rAbandoned\r\n ABANDONED \nabandoned' # noqa: E501
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned abandonned\n')
assert cs.main(d) == 2
code, stdout, stderr = cs.main(
'-q', '16', '-w', d, count=False, std=True)
assert code == 0
assert stdout == stderr == ''
assert cs.main(d) == 0
# empty directory
os.mkdir(op.join(d, 'test'))
assert cs.main(d) == 0
|
35,220 |
def unpackbits(myarray, bitorder='big'):
"""Unpacks elements of a uint8 array into a binary-valued output array.
This function currently does not support ``axis`` option.
Args:
myarray (cupy.ndarray): Input array.
bitorder : {'big', 'little'}, optional. Defaults to 'big'
Returns:
cupy.ndarray: The unpacked array.
.. seealso:: :func:`numpy.unpackbits`
"""
if myarray.dtype != cupy.uint8:
raise TypeError('Expected an input array of unsigned byte data type')
if bitorder not in ['big', 'little']:
raise ValueError("bitorder must be either 'big' or 'little'")
unpacked = cupy.ndarray((myarray.size * 8), dtype=cupy.uint8)
return _unpackbits_kernel[bitorder](myarray, unpacked)
|
def unpackbits(myarray, bitorder='big'):
"""Unpacks elements of a uint8 array into a binary-valued output array.
This function currently does not support ``axis`` option.
Args:
myarray (cupy.ndarray): Input array.
bitorder (str, optional): bit order to use when unpacking the array, allowed values are `'little'` and `'big'`. Defaults to `'big'`.
Returns:
cupy.ndarray: The unpacked array.
.. seealso:: :func:`numpy.unpackbits`
"""
if myarray.dtype != cupy.uint8:
raise TypeError('Expected an input array of unsigned byte data type')
if bitorder not in ['big', 'little']:
raise ValueError("bitorder must be either 'big' or 'little'")
unpacked = cupy.ndarray((myarray.size * 8), dtype=cupy.uint8)
return _unpackbits_kernel[bitorder](myarray, unpacked)
|
30,738 |
def find_ip_by_mac(args, client=CLIENT):
mac = args.get("mac", "")
only_nodes_confirmed = args.get("only_nodes_confirmed", True)
result_error = None
result = [] # type: List[str]
response = client.http_get_request(
f'{QUERY_PATH}nodes | select ip mac_address | where mac_address == {mac}{nodes_confirmed_filter(only_nodes_confirmed)}')
if len(response["result"]) == 0:
human_readable = f'{INTEGRATION_NAME} - Results for the Ip from Mac Search not found ip for mac address: {mac}'
result_error = "Ip not found"
prefix = 'Nozomi.Error'
else:
ips = [node['ip'] for node in response["result"]]
human_readable = f'{INTEGRATION_NAME} - Results for the Ip from Mac Search is {ips}'
result = ips
prefix = 'Nozomi.Ips'
return {
'outputs': result_error if result_error else result,
'outputs_prefix': prefix,
'outputs_key_field': '',
'readable_output': human_readable
}
|
def find_ip_by_mac(args, client=CLIENT):
mac = args.get("mac", "")
only_nodes_confirmed = args.get("only_nodes_confirmed", True)
result_error = None
result = [] # type: List[str]
response = client.http_get_request(
f'{QUERY_PATH}nodes | select ip mac_address | where mac_address == {mac}{nodes_confirmed_filter(only_nodes_confirmed)}')
if len(response["result"]) == 0:
human_readable = f'{INTEGRATION_NAME} - No IP results were found for mac address: {mac}'
result_error = "Ip not found"
prefix = 'Nozomi.Error'
else:
ips = [node['ip'] for node in response["result"]]
human_readable = f'{INTEGRATION_NAME} - Results for the Ip from Mac Search is {ips}'
result = ips
prefix = 'Nozomi.Ips'
return {
'outputs': result_error if result_error else result,
'outputs_prefix': prefix,
'outputs_key_field': '',
'readable_output': human_readable
}
|
32,049 |
def main():
params = demisto.params()
base_url = urljoin(params.get('url'), '/api/v1/')
verify_ssl = not params.get('insecure', False)
proxy = params.get('proxy')
default_threshold = int(params.get('default_threshold', 2))
max_indicator_relationships = int(params.get('max_indicator_relationships', 0))
token = params.get('api_token')
reliability = params.get('integrationReliability')
reliability = reliability if reliability else DBotScoreReliability.C
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception("Please provide a valid value for the Source Reliability parameter.")
client = Client(
base_url=base_url,
headers={'X-OTX-API-KEY': token},
verify=verify_ssl,
proxy=proxy,
default_threshold=default_threshold,
reliability=reliability,
create_relationships=argToBoolean(params.get('create_relationships')),
max_indicator_relationships=max_indicator_relationships
)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module_command,
'domain': domain_command,
'file': file_command,
'url': url_command,
f'{INTEGRATION_COMMAND_NAME}-search-hostname': alienvault_search_hostname_command,
f'{INTEGRATION_COMMAND_NAME}-search-cve': alienvault_search_cve_command,
f'{INTEGRATION_COMMAND_NAME}-get-related-urls-by-indicator': alienvault_get_related_urls_by_indicator_command,
f'{INTEGRATION_COMMAND_NAME}-get-related-hashes-by-indicator': alienvault_get_related_hashes_by_indicator_command,
f'{INTEGRATION_COMMAND_NAME}-get-passive-dns-data-by-indicator': alienvault_get_passive_dns_data_by_indicator_command,
f'{INTEGRATION_COMMAND_NAME}-search-pulses': alienvault_search_pulses_command,
f'{INTEGRATION_COMMAND_NAME}-get-pulse-details': alienvault_get_pulse_details_command
}
try:
if command == f'{INTEGRATION_COMMAND_NAME}-search-ipv6':
return_results(ip_command(client=client,
ip_address=demisto.args().get('ip'),
ip_version='IPv6'))
elif command == 'ip':
return_results(ip_command(client=client,
ip_address=demisto.args().get('ip'),
ip_version='IPv4'))
elif command in ['file', 'domain', 'url']:
return_results(commands[command](client=client, **demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client=client, **demisto.args())
return_outputs(readable_output, outputs, raw_response)
# Log exceptions
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
|
def main():
params = demisto.params()
base_url = urljoin(params.get('url'), '/api/v1/')
verify_ssl = not params.get('insecure', False)
proxy = params.get('proxy')
default_threshold = int(params.get('default_threshold', 2))
max_indicator_relationships = arg_to_number(params.get('max_indicator_relationships', 0))
token = params.get('api_token')
reliability = params.get('integrationReliability')
reliability = reliability if reliability else DBotScoreReliability.C
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception("Please provide a valid value for the Source Reliability parameter.")
client = Client(
base_url=base_url,
headers={'X-OTX-API-KEY': token},
verify=verify_ssl,
proxy=proxy,
default_threshold=default_threshold,
reliability=reliability,
create_relationships=argToBoolean(params.get('create_relationships')),
max_indicator_relationships=max_indicator_relationships
)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module_command,
'domain': domain_command,
'file': file_command,
'url': url_command,
f'{INTEGRATION_COMMAND_NAME}-search-hostname': alienvault_search_hostname_command,
f'{INTEGRATION_COMMAND_NAME}-search-cve': alienvault_search_cve_command,
f'{INTEGRATION_COMMAND_NAME}-get-related-urls-by-indicator': alienvault_get_related_urls_by_indicator_command,
f'{INTEGRATION_COMMAND_NAME}-get-related-hashes-by-indicator': alienvault_get_related_hashes_by_indicator_command,
f'{INTEGRATION_COMMAND_NAME}-get-passive-dns-data-by-indicator': alienvault_get_passive_dns_data_by_indicator_command,
f'{INTEGRATION_COMMAND_NAME}-search-pulses': alienvault_search_pulses_command,
f'{INTEGRATION_COMMAND_NAME}-get-pulse-details': alienvault_get_pulse_details_command
}
try:
if command == f'{INTEGRATION_COMMAND_NAME}-search-ipv6':
return_results(ip_command(client=client,
ip_address=demisto.args().get('ip'),
ip_version='IPv6'))
elif command == 'ip':
return_results(ip_command(client=client,
ip_address=demisto.args().get('ip'),
ip_version='IPv4'))
elif command in ['file', 'domain', 'url']:
return_results(commands[command](client=client, **demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client=client, **demisto.args())
return_outputs(readable_output, outputs, raw_response)
# Log exceptions
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
|
20,328 |
def resolve_cmake_trace_targets(target_name: str,
trace: 'CMakeTraceParser',
env: 'Environment',
*,
clib_compiler: T.Union['MissongCompiler', 'Compiler'] = None,
not_found_warning: T.Callable[[str], None] = lambda x: None) -> ResolvedTarget:
res = ResolvedTarget()
targets = [target_name]
# recognise arguments we should pass directly to the linker
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-l?pthread)$')
reg_is_maybe_bare_lib = re.compile(r'^[a-zA-Z0-9_]+$')
is_debug = cmake_is_debug(env)
processed_targets: T.List[str] = []
while len(targets) > 0:
curr = targets.pop(0)
# Skip already processed targets
if curr in processed_targets:
continue
if curr not in trace.targets:
if reg_is_lib.match(curr):
res.libraries += [curr]
elif Path(curr).is_absolute() and Path(curr).exists():
res.libraries += [curr]
elif env.machines.build.is_windows() and reg_is_maybe_bare_lib.match(curr) and clib_compiler:
# On Windows, CMake library dependencies can be passed as bare library names,
# CMake brute-forces a combination of prefix/suffix combinations to find the
# right library. Assume any bare argument passed which is not also a CMake
# target must be a system library we should try to link against.
res.libraries += clib_compiler.find_library(curr, env, [])
else:
not_found_warning(curr)
continue
tgt = trace.targets[curr]
cfgs = []
cfg = ''
mlog.debug(tgt)
if 'INTERFACE_INCLUDE_DIRECTORIES' in tgt.properties:
res.include_directories += [x for x in tgt.properties['INTERFACE_INCLUDE_DIRECTORIES'] if x]
if 'INTERFACE_LINK_OPTIONS' in tgt.properties:
res.link_flags += [x for x in tgt.properties['INTERFACE_LINK_OPTIONS'] if x]
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
res.public_compile_opts += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
res.public_compile_opts += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs = [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if is_debug:
if 'DEBUG' in cfgs:
cfg = 'DEBUG'
elif 'RELEASE' in cfgs:
cfg = 'RELEASE'
else:
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if f'IMPORTED_IMPLIB_{cfg}' in tgt.properties:
res.libraries += [x for x in tgt.properties[f'IMPORTED_IMPLIB_{cfg}'] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
res.libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif f'IMPORTED_LOCATION_{cfg}' in tgt.properties:
res.libraries += [x for x in tgt.properties[f'IMPORTED_LOCATION_{cfg}'] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
res.libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'LINK_LIBRARIES' in tgt.properties:
targets += [x for x in tgt.properties['LINK_LIBRARIES'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
targets += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if f'IMPORTED_LINK_DEPENDENT_LIBRARIES_{cfg}' in tgt.properties:
targets += [x for x in tgt.properties[f'IMPORTED_LINK_DEPENDENT_LIBRARIES_{cfg}'] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
targets += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
processed_targets += [curr]
res.include_directories = sorted(set(res.include_directories))
res.link_flags = sorted(set(res.link_flags))
res.public_compile_opts = sorted(set(res.public_compile_opts))
res.libraries = sorted(set(res.libraries))
return res
|
def resolve_cmake_trace_targets(target_name: str,
trace: 'CMakeTraceParser',
env: 'Environment',
*,
clib_compiler: T.Union['MissingCompiler', 'Compiler'] = None,
not_found_warning: T.Callable[[str], None] = lambda x: None) -> ResolvedTarget:
res = ResolvedTarget()
targets = [target_name]
# recognise arguments we should pass directly to the linker
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-l?pthread)$')
reg_is_maybe_bare_lib = re.compile(r'^[a-zA-Z0-9_]+$')
is_debug = cmake_is_debug(env)
processed_targets: T.List[str] = []
while len(targets) > 0:
curr = targets.pop(0)
# Skip already processed targets
if curr in processed_targets:
continue
if curr not in trace.targets:
if reg_is_lib.match(curr):
res.libraries += [curr]
elif Path(curr).is_absolute() and Path(curr).exists():
res.libraries += [curr]
elif env.machines.build.is_windows() and reg_is_maybe_bare_lib.match(curr) and clib_compiler:
# On Windows, CMake library dependencies can be passed as bare library names,
# CMake brute-forces a combination of prefix/suffix combinations to find the
# right library. Assume any bare argument passed which is not also a CMake
# target must be a system library we should try to link against.
res.libraries += clib_compiler.find_library(curr, env, [])
else:
not_found_warning(curr)
continue
tgt = trace.targets[curr]
cfgs = []
cfg = ''
mlog.debug(tgt)
if 'INTERFACE_INCLUDE_DIRECTORIES' in tgt.properties:
res.include_directories += [x for x in tgt.properties['INTERFACE_INCLUDE_DIRECTORIES'] if x]
if 'INTERFACE_LINK_OPTIONS' in tgt.properties:
res.link_flags += [x for x in tgt.properties['INTERFACE_LINK_OPTIONS'] if x]
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
res.public_compile_opts += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
res.public_compile_opts += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs = [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if is_debug:
if 'DEBUG' in cfgs:
cfg = 'DEBUG'
elif 'RELEASE' in cfgs:
cfg = 'RELEASE'
else:
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if f'IMPORTED_IMPLIB_{cfg}' in tgt.properties:
res.libraries += [x for x in tgt.properties[f'IMPORTED_IMPLIB_{cfg}'] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
res.libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif f'IMPORTED_LOCATION_{cfg}' in tgt.properties:
res.libraries += [x for x in tgt.properties[f'IMPORTED_LOCATION_{cfg}'] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
res.libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'LINK_LIBRARIES' in tgt.properties:
targets += [x for x in tgt.properties['LINK_LIBRARIES'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
targets += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if f'IMPORTED_LINK_DEPENDENT_LIBRARIES_{cfg}' in tgt.properties:
targets += [x for x in tgt.properties[f'IMPORTED_LINK_DEPENDENT_LIBRARIES_{cfg}'] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
targets += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
processed_targets += [curr]
res.include_directories = sorted(set(res.include_directories))
res.link_flags = sorted(set(res.link_flags))
res.public_compile_opts = sorted(set(res.public_compile_opts))
res.libraries = sorted(set(res.libraries))
return res
|
6,582 |
def check_for_new_doc_with_same_name_as_deleted_parent(doc, doctype):
"""
Compares creation times of parent and child docs.
Since Transaction Deletion Record resets the naming series after deletion,
it allows the creation of new docs with the same names as the deleted ones.
"""
parent_creation_time = frappe.db.get_value(doc['parenttype'], doc['parent'], 'creation')
child_creation_time = frappe.db.get_value(doctype, doc, 'creation')
if getdate(parent_creation_time) > getdate(child_creation_time):
return True
return False
|
def check_for_new_doc_with_same_name_as_deleted_parent(doc, doctype):
"""
Compares creation times of parent and child docs.
Since Transaction Deletion Record resets the naming series after deletion,
it allows the creation of new docs with the same names as the deleted ones.
"""
parent_creation_time = frappe.db.get_value(doc['parenttype'], doc['parent'], 'creation')
child_creation_time = frappe.db.get_value(doctype, doc, 'creation')
return getdate(parent_creation_time) > getdate(child_creation_time)
|
32,546 |
def ip_threats_analysis(severity_score, threats: List, ip: str, threshold: str, dbot_score_obj):
""" process raw response data and generate dbot score ,human readable results, ip indicator object
Args:
- threats (list): threats data from cofense raw response
- indicator (string): threat severity level for dbot score calculation
- threshold (string): threshold for threat's severity
return:
Dict: represents human readable markdown table
int: dbot score
ip indicator : indicator object with the data collected from the threats
"""
block_type = BLOCK_TYPE_MAPPING["ip"]
threshold_score = severity_score.get(threshold, -1)
if threshold_score < 0 or threshold_score > 3:
raise Exception(
f'Cofense error: Invalid threshold value: {threshold}. Valid values are: None, Minor, Moderate or Major')
md_data = []
dbot_score = 0
ip_indicator = Common.IP(ip=ip, dbot_score=dbot_score_obj)
for threat in threats:
severity_level = 0
indicator_found = False
for block in threat.get('blockSet', {}):
data_1_content = extract_indicator_from_block(block, command="ip")
if block.get("blockType") == block_type and data_1_content == ip and block.get("impact"):
indicator_found = True
threat_score = severity_score.get(block.get('impact'), 0)
adjusted_score = 3 if threshold_score <= threat_score else threat_score
severity_level = max(severity_level, adjusted_score)
if block.get('ipDetail') and block.get('ipDetail').get('ip') == ip:
ip_indicator.asn = block.get('ipDetail').get('asn')
ip_indicator.geo_latitude = block.get("ipDetail").get("latitude")
ip_indicator.geo_longitude = block.get("ipDetail").get("longitude")
ip_indicator.geo_country = block.get("ipDetail").get("countryIsoCode")
ip_indicator.malware_family = block.get('malwareFamily', {}).get('familyName')
if indicator_found:
dbot_score = max(dbot_score, severity_level)
threat_md_row = create_threat_md_row(threat, severity_level)
threat_md_row["ASN"] = ip_indicator.asn
threat_md_row["Country"] = ip_indicator.geo_country
md_data.append(threat_md_row)
return md_data, dbot_score, ip_indicator
|
def ip_threats_analysis(severity_score, threats: List, ip: str, threshold: str, dbot_score_obj):
""" process raw response data and generate dbot score ,human readable results, ip indicator object
Args:
- threats (list): threats data from cofense raw response
- indicator (string): threat severity level for dbot score calculation
- threshold (string): threshold for threat's severity
return:
Dict: represents human readable markdown table
int: dbot score
ip indicator : indicator object with the data collected from the threats
"""
block_type = BLOCK_TYPE_MAPPING.get("ip")
threshold_score = severity_score.get(threshold, -1)
if threshold_score < 0 or threshold_score > 3:
raise Exception(
f'Cofense error: Invalid threshold value: {threshold}. Valid values are: None, Minor, Moderate or Major')
md_data = []
dbot_score = 0
ip_indicator = Common.IP(ip=ip, dbot_score=dbot_score_obj)
for threat in threats:
severity_level = 0
indicator_found = False
for block in threat.get('blockSet', {}):
data_1_content = extract_indicator_from_block(block, command="ip")
if block.get("blockType") == block_type and data_1_content == ip and block.get("impact"):
indicator_found = True
threat_score = severity_score.get(block.get('impact'), 0)
adjusted_score = 3 if threshold_score <= threat_score else threat_score
severity_level = max(severity_level, adjusted_score)
if block.get('ipDetail') and block.get('ipDetail').get('ip') == ip:
ip_indicator.asn = block.get('ipDetail').get('asn')
ip_indicator.geo_latitude = block.get("ipDetail").get("latitude")
ip_indicator.geo_longitude = block.get("ipDetail").get("longitude")
ip_indicator.geo_country = block.get("ipDetail").get("countryIsoCode")
ip_indicator.malware_family = block.get('malwareFamily', {}).get('familyName')
if indicator_found:
dbot_score = max(dbot_score, severity_level)
threat_md_row = create_threat_md_row(threat, severity_level)
threat_md_row["ASN"] = ip_indicator.asn
threat_md_row["Country"] = ip_indicator.geo_country
md_data.append(threat_md_row)
return md_data, dbot_score, ip_indicator
|
7,282 |
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str, optional
One of the following strings, selecting the type of noise to add:
- 'gaussian' Gaussian-distributed additive noise.
- 'localvar' Gaussian-distributed additive noise, with specified
local variance at each point of `image`.
- 'poisson' Poisson-distributed noise generated from the data.
- 'salt' Replaces random pixels with 1.
- 'pepper' Replaces random pixels with 0 (for unsigned images) or
-1 (for signed images).
- 's&p' Replaces random pixels with either 1 or `low_val`, where
`low_val` is 0 for unsigned images or -1 for signed
images.
- 'speckle' Multiplicative noise using out = image + n*image, where
n is Gaussian noise with specified mean & variance.
seed : int, optional
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
clip : bool, optional
If True (default), the output will be clipped after noise applied
for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
needed to maintain the proper image data range. If False, clipping
is not applied, and the output may extend beyond the range [-1, 1].
mean : float, optional
Mean of random distribution. Used in 'gaussian' and 'speckle'.
Default : 0.
var : float, optional
Variance of random distribution. Used in 'gaussian' and 'speckle'.
Note: variance = (standard deviation) ** 2. Default : 0.01
local_vars : ndarray, optional
Array of positive floats, same shape as `image`, defining the local
variance at every image point. Used in 'localvar'.
amount : float, optional
Proportion of image pixels to replace with noise on range [0, 1].
Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
salt_vs_pepper : float, optional
Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
Higher values represent more salt. Default : 0.5 (equal amounts)
Returns
-------
out : ndarray
Output floating-point image data on range [0, 1] or [-1, 1] if the
input `image` was unsigned or signed, respectively.
Notes
-----
Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
the valid image range. The default is to clip (not alias) these values,
but they may be preserved by setting `clip=False`. Note that in this case
the output may contain values outside the ranges [0, 1] or [-1, 1].
Use this option with care.
Because of the prevalence of exclusively positive floating-point images in
intermediate calculations, it is not possible to intuit if an input is
signed based on dtype alone. Instead, negative values are explicitly
searched for. Only if found does this function assume signed input.
Unexpected results only occur in rare, poorly exposes cases (e.g. if all
values are above 50 percent gray in a signed `image`). In this event,
manually scaling the input to the positive domain will solve the problem.
The Poisson distribution is only defined for positive integers. To apply
this noise type, the number of unique values in the image is found and
the next round power of two is used to scale up the floating-point result,
after which it is scaled back down to the floating-point image range.
To generate Poisson noise against a signed image, the signed image is
temporarily converted to an unsigned image in the floating point domain,
Poisson noise is generated, then it is returned to the original range.
"""
mode = mode.lower()
# Detect if a signed image was input
if image.min() < 0:
low_clip = -1.
else:
low_clip = 0.
image = img_as_float(image)
if seed is not None:
np.random.seed(seed=seed)
allowedtypes = {
'gaussian': 'gaussian_values',
'localvar': 'localvar_values',
'poisson': 'poisson_values',
'salt': 'sp_values',
'pepper': 'sp_values',
's&p': 's&p_values',
'speckle': 'gaussian_values'}
kwdefaults = {
'mean': 0.,
'var': 0.01,
'amount': 0.05,
'salt_vs_pepper': 0.5,
'local_vars': np.zeros_like(image) + 0.01}
allowedkwargs = {
'gaussian_values': ['mean', 'var'],
'localvar_values': ['local_vars'],
'sp_values': ['amount'],
's&p_values': ['amount', 'salt_vs_pepper'],
'poisson_values': []}
for key in kwargs:
if key not in allowedkwargs[allowedtypes[mode]]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[allowedtypes[mode]]))
# Set kwarg defaults
for kw in allowedkwargs[allowedtypes[mode]]:
kwargs.setdefault(kw, kwdefaults[kw])
if mode == 'gaussian':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + noise
elif mode == 'localvar':
# Ensure local variance input is correct
if (kwargs['local_vars'] <= 0).any():
raise ValueError('All values of `local_vars` must be > 0.')
# Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc
out = image + np.random.normal(0, kwargs['local_vars'] ** 0.5)
elif mode == 'poisson':
# Determine unique values in image & calculate the next power of two
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
# Ensure image is exclusively positive
if low_clip == -1.:
old_max = image.max()
image = (image + 1.) / (old_max + 1.)
# Generating noise for each unique value in image.
out = np.random.poisson(image * vals) / float(vals)
# Return image to original range if input was signed
if low_clip == -1.:
out = out * (old_max + 1.) - 1.
elif mode == 'salt':
# Re-call function with mode='s&p' and p=1 (all salt noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=1.)
elif mode == 'pepper':
# Re-call function with mode='s&p' and p=1 (all pepper noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=0.)
elif mode == 's&p':
out = image.copy()
p = kwargs['amount']
q = kwargs['salt_vs_pepper']
flipped = choose(p, image.shape)
salted = choose(q, image.shape)
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = low_clip
elif mode == 'speckle':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + image * noise
# Clip back to original range, if necessary
if clip:
out = np.clip(out, low_clip, 1.0)
return out
|
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str, optional
One of the following strings, selecting the type of noise to add:
- 'gaussian' Gaussian-distributed additive noise.
- 'localvar' Gaussian-distributed additive noise, with specified
local variance at each point of `image`.
- 'poisson' Poisson-distributed noise generated from the data.
- 'salt' Replaces random pixels with 1.
- 'pepper' Replaces random pixels with 0 (for unsigned images) or
-1 (for signed images).
- 's&p' Replaces random pixels with either 1 or `low_val`, where
`low_val` is 0 for unsigned images or -1 for signed
images.
- 'speckle' Multiplicative noise using out = image + n*image, where
n is Gaussian noise with specified mean & variance.
seed : int, optional
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
clip : bool, optional
If True (default), the output will be clipped after noise applied
for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
needed to maintain the proper image data range. If False, clipping
is not applied, and the output may extend beyond the range [-1, 1].
mean : float, optional
Mean of random distribution. Used in 'gaussian' and 'speckle'.
Default : 0.
var : float, optional
Variance of random distribution. Used in 'gaussian' and 'speckle'.
Note: variance = (standard deviation) ** 2. Default : 0.01
local_vars : ndarray, optional
Array of positive floats, same shape as `image`, defining the local
variance at every image point. Used in 'localvar'.
amount : float, optional
Proportion of image pixels to replace with noise on range [0, 1].
Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
salt_vs_pepper : float, optional
Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
Higher values represent more salt. Default : 0.5 (equal amounts)
Returns
-------
out : ndarray
Output floating-point image data on range [0, 1] or [-1, 1] if the
input `image` was unsigned or signed, respectively.
Notes
-----
Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
the valid image range. The default is to clip (not alias) these values,
but they may be preserved by setting `clip=False`. Note that in this case
the output may contain values outside the ranges [0, 1] or [-1, 1].
Use this option with care.
Because of the prevalence of exclusively positive floating-point images in
intermediate calculations, it is not possible to intuit if an input is
signed based on dtype alone. Instead, negative values are explicitly
searched for. Only if found does this function assume signed input.
Unexpected results only occur in rare, poorly exposes cases (e.g. if all
values are above 50 percent gray in a signed `image`). In this event,
manually scaling the input to the positive domain will solve the problem.
The Poisson distribution is only defined for positive integers. To apply
this noise type, the number of unique values in the image is found and
the next round power of two is used to scale up the floating-point result,
after which it is scaled back down to the floating-point image range.
To generate Poisson noise against a signed image, the signed image is
temporarily converted to an unsigned image in the floating point domain,
Poisson noise is generated, then it is returned to the original range.
"""
mode = mode.lower()
# Detect if a signed image was input
if image.min() < 0:
low_clip = -1.
else:
low_clip = 0.
image = img_as_float(image)
if seed is not None:
np.random.seed(seed=seed)
allowedtypes = {
'gaussian': 'gaussian_values',
'localvar': 'localvar_values',
'poisson': 'poisson_values',
'salt': 'sp_values',
'pepper': 'sp_values',
's&p': 's&p_values',
'speckle': 'gaussian_values'}
kwdefaults = {
'mean': 0.,
'var': 0.01,
'amount': 0.05,
'salt_vs_pepper': 0.5,
'local_vars': np.zeros_like(image) + 0.01}
allowedkwargs = {
'gaussian_values': ['mean', 'var'],
'localvar_values': ['local_vars'],
'sp_values': ['amount'],
's&p_values': ['amount', 'salt_vs_pepper'],
'poisson_values': []}
for key in kwargs:
if key not in allowedkwargs[allowedtypes[mode]]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[allowedtypes[mode]]))
# Set kwarg defaults
for kw in allowedkwargs[allowedtypes[mode]]:
kwargs.setdefault(kw, kwdefaults[kw])
if mode == 'gaussian':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + noise
elif mode == 'localvar':
# Ensure local variance input is correct
if (kwargs['local_vars'] <= 0).any():
raise ValueError('All values of `local_vars` must be > 0.')
# Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc
out = image + np.random.normal(0, kwargs['local_vars'] ** 0.5)
elif mode == 'poisson':
# Determine unique values in image & calculate the next power of two
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
# Ensure image is exclusively positive
if low_clip == -1.:
old_max = image.max()
image = (image + 1.) / (old_max + 1.)
# Generating noise for each unique value in image.
out = np.random.poisson(image * vals) / float(vals)
# Return image to original range if input was signed
if low_clip == -1.:
out = out * (old_max + 1.) - 1.
elif mode == 'salt':
# Re-call function with mode='s&p' and p=1 (all salt noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=1.)
elif mode == 'pepper':
# Re-call function with mode='s&p' and p=1 (all pepper noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=0.)
elif mode == 's&p':
out = image.copy()
p = kwargs['amount']
q = kwargs['salt_vs_pepper']
flipped = _bernoulli(p, image.shape)
salted = choose(q, image.shape)
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = low_clip
elif mode == 'speckle':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + image * noise
# Clip back to original range, if necessary
if clip:
out = np.clip(out, low_clip, 1.0)
return out
|
49,882 |
def test_run_model_with_weather_noct_sam_temp(sapm_dc_snl_ac_system, location,
weather, mocker):
weather['wind_speed'] = 5
weather['temp_air'] = 10
sapm_dc_snl_ac_system.temperature_model_parameters = {
'noct': 45, 'eta_m_ref': 0.2
}
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.temperature_model = 'noct_sam'
m_noct_sam = mocker.spy(sapm_dc_snl_ac_system, 'noct_sam_celltemp')
mc.run_model(weather)
assert m_noct_sam.call_count == 1
assert_series_equal(m_noct_sam.call_args[0][1], weather['temp_air'])
assert_series_equal(m_noct_sam.call_args[0][2], weather['wind_speed'])
# check that effective_irradiance was used
assert m_noct_sam.call_args.kwargs == {
'effective_irradiance': mc.results.effective_irradiance}
|
def test_run_model_with_weather_noct_sam_temp(sapm_dc_snl_ac_system, location,
weather, mocker):
weather['wind_speed'] = 5
weather['temp_air'] = 10
sapm_dc_snl_ac_system.temperature_model_parameters = {
'noct': 45, 'eta_m_ref': 0.2
}
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.temperature_model = 'noct_sam'
m_noct_sam = mocker.spy(sapm_dc_snl_ac_system, 'noct_sam_celltemp')
mc.run_model(weather)
assert m_noct_sam.call_count == 1
assert_series_equal(m_noct_sam.call_args[0][1], weather['temp_air'])
assert_series_equal(m_noct_sam.call_args[0][2], weather['wind_speed'])
# check that effective_irradiance was used
assert m_noct_sam.call_args[1] == {
'effective_irradiance': mc.results.effective_irradiance}
|
36,200 |
def convert_pni_file_to_product_page_image(pni_product: Product, new_product_page: ProductPage):
"""
Take an existing product and convert it's FileField image to a WagtailImage object.
pni_product must be an instance of a Product
new_product_page must be an instance of a ProductPage
"""
# 1. Get the mimetype of the image.
mime = MimeTypes()
mime_type = mime.guess_type(pni_product.image.file.name) # -> ('image/jpeg', None)
mime_type = mime_type[0].split('/')[1].upper()
# 2. Create an image out of the FileField.
pil_image = PILImage.open(pni_product.image.file)
f = BytesIO()
pil_image.save(f, mime_type)
new_image_name = ntpath.basename(pni_product.image.file.name)
wagtail_image = WagtailImage.objects.create(
title=new_image_name,
file=ImageFile(f, name=new_image_name)
)
# 3. Associate new_product_page.image with wagtail_image
new_product_page.image = wagtail_image
# 4. If the product is a draft, don't publish the page.
# If the product is NOT a draft, publish the latest revision.
if not pni_product.draft:
new_product_page.save_revision().publish()
else:
new_product_page.save_revision()
new_product_page.save()
|
def convert_pni_file_to_product_page_image(pni_product: Product, new_product_page: ProductPage):
"""
Take an existing product and convert it's FileField image to a WagtailImage object.
pni_product must be an instance of a Product
new_product_page must be an instance of a ProductPage
"""
# 1. Get the mimetype of the image.
mime = MimeTypes()
mime_type = mime.guess_type(pni_product.image.file.name) # -> ('image/jpeg', None)
mime_type = mime_type[0].split('/')[1].upper()
# 2. Create an image out of the FileField.
pil_image = PILImage.open(pni_product.image.file)
pil_image.save(BytesIO() , mime_type)
new_image_name = ntpath.basename(pni_product.image.file.name)
wagtail_image = WagtailImage.objects.create(
title=new_image_name,
file=ImageFile(f, name=new_image_name)
)
# 3. Associate new_product_page.image with wagtail_image
new_product_page.image = wagtail_image
# 4. If the product is a draft, don't publish the page.
# If the product is NOT a draft, publish the latest revision.
if not pni_product.draft:
new_product_page.save_revision().publish()
else:
new_product_page.save_revision()
new_product_page.save()
|
3,837 |
def test_all_simple_paths_with_two_targets():
G = nx.path_graph(4)
G.add_edge(2, 4)
paths = nx.all_simple_paths(G, 0, [3, 4])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2, 3), (0, 1, 2, 4)})
G = nx.path_graph(4, create_using=nx.DiGraph())
G.add_edge(2, 4)
paths = nx.all_simple_paths(G, 0, [3, 4])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2, 3), (0, 1, 2, 4)})
|
def test_with_all_simple_paths_with_two_targets_emits_two_paths():
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2, 3), (0, 1, 2, 4)})
|
430 |
def as_value_source(
value_source_config: Union[dict, JsonDict],
) -> ValueSource:
if isinstance(value_source_config, JsonDict):
value_source_config = dict(value_source_config) # JsonDict fails assertion in Schema.validate()
for subclass in recurse_subclasses(ValueSource):
try:
args, kwargs = subclass.get_schema_params()
validated_config = Schema(*args, **kwargs).validate(value_source_config)
except SchemaError:
pass
else:
return subclass.wrap(validated_config)
else:
raise TypeError(f"Unable to determine class for {value_source_config!r}")
|
def as_value_source(
value_source_config: Union[dict, JsonDict],
) -> ValueSource:
if not isinstance(value_source_config, dict):
value_source_config = dict(value_source_config) # JsonDict fails assertion in Schema.validate()
for subclass in recurse_subclasses(ValueSource):
try:
args, kwargs = subclass.get_schema_params()
validated_config = Schema(*args, **kwargs).validate(value_source_config)
except SchemaError:
pass
else:
return subclass.wrap(validated_config)
else:
raise TypeError(f"Unable to determine class for {value_source_config!r}")
|
11,547 |
def fetch_git_dirty():
# Ensure git index is up-to-date first. This usually isn't necessary, but
# can be needed inside a docker container where the index is out of date.
# pylint: disable=compare-to-zero
subprocess.call(["git", "update-index", "-q", "--refresh"])
dirty_tree = subprocess.call(["git", "diff-files", "--quiet"]) != 0
dirty_index = (
subprocess.call(["git", "diff-index", "--quiet", "--cached", "HEAD"]) != 0
)
return dirty_tree or dirty_index
|
def fetch_git_dirty():
# Ensure git index is up-to-date first. This usually isn't necessary, but
# can be needed inside a docker container where the index is out of date.
# pylint: disable=compare-to-zero
subprocess.call(["git", "update-index", "-q", "--refresh"])
dirty_tree = bool(subprocess.call(["git", "diff-files", "--quiet"]))
dirty_index = (
subprocess.call(["git", "diff-index", "--quiet", "--cached", "HEAD"]) != 0
)
return dirty_tree or dirty_index
|
42,119 |
def prepare_study_with_trials(
n_objectives: int = 1,
direction: str = "minimize",
value_for_first_trial: float = 0.0,
) -> Study:
"""Return a dummy study object for tests.
This function is added to reduce the code to set up dummy study object in each test case.
However, you can only use this function for unit tests that are loosely coupled with the
dummy study object. Unittests that are tightly coupled with the study become difficult to
read because of `Mystery Guest <http://xunitpatterns.com/Obscure%20Test.html#Mystery%20Guest>`_
and/or `Eager Test <http://xunitpatterns.com/Obscure%20Test.html#Eager%20Test>`_ anti-patterns.
Args:
n_objectives: Number of objective values.
direction: Study's optimization direction.
value_for_first_trial: Objective value in first trial. This value will be broadcasted
to all objectives in multi-objective optimization.
Returns:
:class:`~optuna.study.Study`
"""
study = create_study(directions=[direction] * n_objectives)
study.add_trial(
create_trial(
values=[value_for_first_trial] * n_objectives,
params={"param_a": 1.0, "param_b": 2.0, "param_c": 3.0, "param_d": 4.0},
distributions={
"param_a": FloatDistribution(0.0, 3.0),
"param_b": FloatDistribution(0.0, 3.0),
"param_c": FloatDistribution(2.0, 5.0),
"param_d": FloatDistribution(2.0, 5.0),
},
)
)
study.add_trial(
create_trial(
values=[2.0] * n_objectives,
params={"param_b": 0.0, "param_d": 4.0},
distributions={
"param_b": FloatDistribution(0.0, 3.0),
"param_d": FloatDistribution(2.0, 5.0),
},
)
)
study.add_trial(
create_trial(
values=[1.0] * n_objectives,
params={"param_a": 2.5, "param_b": 1.0, "param_c": 4.5, "param_d": 2.0},
distributions={
"param_a": FloatDistribution(0.0, 3.0),
"param_b": FloatDistribution(0.0, 3.0),
"param_c": FloatDistribution(2.0, 5.0),
"param_d": FloatDistribution(2.0, 5.0),
},
)
)
return study
|
def prepare_study_with_trials(
n_objectives: int = 1,
direction: str = "minimize",
value_for_first_trial: float = 0.0,
) -> Study:
"""Return a dummy study object for tests.
This function is added to reduce the code to set up dummy study object in each test case.
However, you can only use this function for unit tests that are loosely coupled with the
dummy study object. Unit tests that are tightly coupled with the study become difficult to
read because of `Mystery Guest <http://xunitpatterns.com/Obscure%20Test.html#Mystery%20Guest>`_
and/or `Eager Test <http://xunitpatterns.com/Obscure%20Test.html#Eager%20Test>`_ anti-patterns.
Args:
n_objectives: Number of objective values.
direction: Study's optimization direction.
value_for_first_trial: Objective value in first trial. This value will be broadcasted
to all objectives in multi-objective optimization.
Returns:
:class:`~optuna.study.Study`
"""
study = create_study(directions=[direction] * n_objectives)
study.add_trial(
create_trial(
values=[value_for_first_trial] * n_objectives,
params={"param_a": 1.0, "param_b": 2.0, "param_c": 3.0, "param_d": 4.0},
distributions={
"param_a": FloatDistribution(0.0, 3.0),
"param_b": FloatDistribution(0.0, 3.0),
"param_c": FloatDistribution(2.0, 5.0),
"param_d": FloatDistribution(2.0, 5.0),
},
)
)
study.add_trial(
create_trial(
values=[2.0] * n_objectives,
params={"param_b": 0.0, "param_d": 4.0},
distributions={
"param_b": FloatDistribution(0.0, 3.0),
"param_d": FloatDistribution(2.0, 5.0),
},
)
)
study.add_trial(
create_trial(
values=[1.0] * n_objectives,
params={"param_a": 2.5, "param_b": 1.0, "param_c": 4.5, "param_d": 2.0},
distributions={
"param_a": FloatDistribution(0.0, 3.0),
"param_b": FloatDistribution(0.0, 3.0),
"param_c": FloatDistribution(2.0, 5.0),
"param_d": FloatDistribution(2.0, 5.0),
},
)
)
return study
|
3,004 |
def _get_root(key: str) -> Tuple[Any, str]:
path = key.split(".")
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
|
def _get_root(key: str) -> Tuple[Dict[str, Any], str]:
path = key.split(".")
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
|
35,558 |
def read_sensor_events(sensor_types, duration_sec):
esocks = {}
events = {}
for stype in sensor_types:
esocks[stype] = messaging.sub_sock(stype, timeout=0.1)
events[stype] = []
start_time_sec = time.monotonic()
while time.monotonic() - start_time_sec < duration_sec:
for esock in esocks:
events[esock] += messaging.drain_sock(esocks[esock])
time.sleep(0.01)
if not is_bmx_available():
del events['accelerometer2']
del events['gyroscope2']
for etype in events:
assert len(events[etype]) != 0, f"No {etype} events collected"
return events
|
def read_sensor_events(sensor_types, duration_sec):
esocks = {}
events = {}
for stype in sensor_types:
esocks[stype] = messaging.sub_sock(stype, timeout=0.1)
events[stype] = []
start_time_sec = time.monotonic()
while time.monotonic() - start_time_sec < duration_sec:
for esock in esocks:
events[esock] += messaging.drain_sock(esocks[esock])
time.sleep(0.1)
if not is_bmx_available():
del events['accelerometer2']
del events['gyroscope2']
for etype in events:
assert len(events[etype]) != 0, f"No {etype} events collected"
return events
|
5,321 |
def get_health_check_by_name(name, region=None, key=None, keyid=None, profile=None):
'''
Return detailed info about the given healthcheck (by name).
name
The name of the health check to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.get_health_check_by_name ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
id = get_health_check_id_by_name(name, region, key, keyid, profile)
return get_health_check(id, region, key, keyid, profile) if id != None else None
|
def get_health_check_by_name(name, region=None, key=None, keyid=None, profile=None):
'''
Return detailed info about the given healthcheck (by name).
name
The name of the health check to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.get_health_check_by_name ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
id = get_health_check_id_by_name(name, region, key, keyid, profile)
return None if id is None else get_health_check(id, region, key, keyid, profile)
|
46,555 |
def run_fork_test(spec, pre_state):
yield 'pre', pre_state
post_state = spec.upgrade_to_lightclient_patch(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == spec.LIGHTCLIENT_PATCH_FORK_VERSION
assert post_state.fork.epoch == spec.get_current_epoch(post_state)
yield 'post', post_state
|
def run_fork_test(post_spec, pre_state):
yield 'pre', pre_state
post_state = spec.upgrade_to_lightclient_patch(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == spec.LIGHTCLIENT_PATCH_FORK_VERSION
assert post_state.fork.epoch == spec.get_current_epoch(post_state)
yield 'post', post_state
|
42,999 |
def nullT(n, m, U):
r"""Nullifies element n,m of U using T"""
(nmax, mmax) = U.shape
if nmax != mmax:
raise ValueError("U must be a square matrix")
if (U[n-1, m] == 0 and U[n, m] == 0):
thetar = 0
phir = 0
elif U[n-1, m] == 0:
thetar = np.pi/2
phir = 0
else:
r = -U[n, m] / U[n-1, m]
thetar = np.arctan(np.abs(r))
phir = np.angle(r)
return [n-1, n, thetar, phir, nmax]
|
def nullT(n, m, U):
r"""Nullifies element n,m of U using T"""
(nmax, mmax) = U.shape
if nmax != mmax:
raise ValueError("U must be a square matrix")
if U[n, m] == 0:
thetar = 0
phir = 0
elif U[n-1, m] == 0:
thetar = np.pi/2
phir = 0
else:
r = -U[n, m] / U[n-1, m]
thetar = np.arctan(np.abs(r))
phir = np.angle(r)
return [n-1, n, thetar, phir, nmax]
|
31,971 |
def date_to_epoch(date: str, formatter: Optional[str] = None):
epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
date_obj = datetime.strptime(date, formatter) if formatter else dateparser.parse(date)
return int(date_obj.strftime('%s') if date_obj.tzinfo is None else (date_obj - epoch).total_seconds())
|
def date_to_epoch(date: str, formatter: Optional[str] = None) -> int:
epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
date_obj = datetime.strptime(date, formatter) if formatter else dateparser.parse(date)
return int(date_obj.strftime('%s') if date_obj.tzinfo is None else (date_obj - epoch).total_seconds())
|
4,919 |
def _make_margin_suptitles(fig, renderer, *, w_pad=0, h_pad=0):
# Figure out how large the suptitle is and make the
# top level figure margin larger.
invTransFig = fig.transFigure.inverted().transform_bbox
# get the h_pad and w_pad as distances in the local subfigure coordinates:
padbox = mtransforms.Bbox([[0, 0], [w_pad, h_pad]])
padbox = (fig.transFigure -
fig.transSubfigure).transform_bbox(padbox)
h_pad_local = padbox.height
w_pad_local = padbox.width
for panel in fig.subfigs:
_make_margin_suptitles(panel, renderer, w_pad=w_pad, h_pad=h_pad)
if fig._suptitle is not None and fig._suptitle.get_in_layout():
p = fig._suptitle.get_position()
fig._suptitle.set_position((p[0], 1 - h_pad_local))
bbox = invTransFig(fig._suptitle.get_tightbbox(renderer)) # fig coords
fig._layoutgrid.edit_margin_min('top', bbox.height + 2.0 * h_pad)
if fig._supxlabel is not None and fig._supxlabel.get_in_layout():
p = fig._supxlabel.get_position()
fig._supxlabel.set_position((p[0], h_pad_local))
bbox = invTransFig(fig._supxlabel.get_tightbbox(renderer)) # fig coord
fig._layoutgrid.edit_margin_min('bottom', bbox.height + 2 * h_pad)
if fig._supylabel is not None and fig._supxlabel.get_in_layout():
p = fig._supylabel.get_position()
fig._supylabel.set_position((w_pad_local, p[1]))
bbox = invTransFig(fig._supylabel.get_tightbbox(renderer)) # fig coord
fig._layoutgrid.edit_margin_min('left', bbox.width + 2 * w_pad)
|
def _make_margin_suptitles(fig, renderer, *, w_pad=0, h_pad=0):
# Figure out how large the suptitle is and make the
# top level figure margin larger.
invTransFig = fig.transFigure.inverted().transform_bbox
# get the h_pad and w_pad as distances in the local subfigure coordinates:
padbox = mtransforms.Bbox([[0, 0], [w_pad, h_pad]])
padbox = (fig.transFigure -
fig.transSubfigure).transform_bbox(padbox)
h_pad_local = padbox.height
w_pad_local = padbox.width
for panel in fig.subfigs:
_make_margin_suptitles(panel, renderer, w_pad=w_pad, h_pad=h_pad)
if fig._suptitle is not None and fig._suptitle.get_in_layout():
p = fig._suptitle.get_position()
fig._suptitle.set_position((p[0], 1 - h_pad_local))
bbox = invTransFig(fig._suptitle.get_tightbbox(renderer)) # fig coords
fig._layoutgrid.edit_margin_min('top', bbox.height + 2 * h_pad)
if fig._supxlabel is not None and fig._supxlabel.get_in_layout():
p = fig._supxlabel.get_position()
fig._supxlabel.set_position((p[0], h_pad_local))
bbox = invTransFig(fig._supxlabel.get_tightbbox(renderer)) # fig coord
fig._layoutgrid.edit_margin_min('bottom', bbox.height + 2 * h_pad)
if fig._supylabel is not None and fig._supxlabel.get_in_layout():
p = fig._supylabel.get_position()
fig._supylabel.set_position((w_pad_local, p[1]))
bbox = invTransFig(fig._supylabel.get_tightbbox(renderer)) # fig coord
fig._layoutgrid.edit_margin_min('left', bbox.width + 2 * w_pad)
|
13,419 |
def test_16_delete_the_firs_pool_and_verify_the_system_dataset_moved_to_the_boot_pool(request, pool_data):
payload = {
'cascade': True,
'restart_services': True,
'destroy': True
}
results = POST(f'/pool/id/{pool_data["first_pool"]["id"]}/export/', payload)
assert results.status_code == 200, results.text
job_id = results.json()
job_status = wait_on_job(job_id, 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'boot-pool', results.text
assert results.json()['basename'] == 'boot-pool/.system', results.text
|
def test_16_delete_first_pool_and_verify_sysds_moved_to_the_boot_pool(request, pool_data):
payload = {
'cascade': True,
'restart_services': True,
'destroy': True
}
results = POST(f'/pool/id/{pool_data["first_pool"]["id"]}/export/', payload)
assert results.status_code == 200, results.text
job_id = results.json()
job_status = wait_on_job(job_id, 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'boot-pool', results.text
assert results.json()['basename'] == 'boot-pool/.system', results.text
|
44,832 |
def test_diviner_model_save_persists_specified_conda_env_in_mlflow_model_directory(
grouped_prophet, model_path, diviner_custom_env
):
mlflow.diviner.save_model(
diviner_model=grouped_prophet, path=model_path, conda_env=diviner_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = pathlib.Path(model_path).joinpath(pyfunc_conf[pyfunc.ENV])
assert saved_conda_env_path.exists()
assert str(saved_conda_env_path) != diviner_custom_env
with open(diviner_custom_env, "r") as f:
diviner_custom_env_parsed = yaml.safe_load(f)
with open(str(saved_conda_env_path), "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == diviner_custom_env_parsed
|
def test_diviner_model_save_persists_specified_conda_env_in_mlflow_model_directory(
grouped_prophet, model_path, diviner_custom_env
):
mlflow.diviner.save_model(
diviner_model=grouped_prophet, path=model_path, conda_env=diviner_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = pathlib.Path(model_path).joinpath(pyfunc_conf[pyfunc.ENV])
assert saved_conda_env_path.exists()
assert str(saved_conda_env_path) != diviner_custom_env
with open(diviner_custom_env, "r") as f:
diviner_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == diviner_custom_env_parsed
|
45,721 |
def reprojection(R_src, R_dst):
"""Reprojects precipitation fields to the domain of another precipiation
field.
Parameters
----------
R_src: xarray
Three-dimensional xarray with dimensions (t, x, y) containing a
time series of precipitation fields. These precipitaiton fields
will be reprojected.
R_dst: xarray
Xarray containing a precipitation field or a time series of precipitation
fields. The xarray R_src will be reprojected to the domain of R_dst.
Returns
-------
R_rprj: xarray
Three-dimensional xarray with dimensions (t, x, y) containing the
precipitation fields of R_src, but reprojected to the domain of
R_dst.
"""
# Extract the grid info from R_src
src_crs = R_src.attrs["projection"]
x1_src = R_src.x.attrs["x1"]
y2_src = R_src.y.attrs["y2"]
xpixelsize_src = R_src.attrs["xpixelsize"]
ypixelsize_src = R_src.attrs["ypixelsize"]
src_transform = A.translation(float(x1_src), float(y2_src)) * A.scale(
float(xpixelsize_src), float(-ypixelsize_src)
)
# Extract the grid info from R_dst
dst_crs = R_dst.attrs["projection"]
x1_dst = R_dst.x.attrs["x1"]
y2_dst = R_dst.y.attrs["y2"]
xpixelsize_dst = R_dst.attrs["xpixelsize"]
ypixelsize_dst = R_dst.attrs["ypixelsize"]
dst_transform = A.translation(float(x1_dst), float(y2_dst)) * A.scale(
float(xpixelsize_dst), float(-ypixelsize_dst)
)
# Initialise the reprojected (x)array
R_rprj = np.zeros((R_src.shape[0], R_dst.shape[-2], R_dst.shape[-1]))
# For every timestep, reproject the precipitation field of R_src to
# the domain of R_dst
if R_src.attrs["yorigin"] != R_dst.attrs["yorigin"]:
R_src = R_src[:, ::-1, :]
for i in range(R_src.shape[0]):
reproject(
R_src.values[i, :, :],
R_rprj[i, :, :],
src_transform=src_transform,
src_crs=src_crs,
dst_transform=dst_transform,
dst_crs=dst_crs,
resampling=Resampling.nearest,
dst_nodata=np.nan,
)
# Assign the necessary attributes from R_src and R_dst to R_rprj
R_rprj = xr.DataArray(
data=R_rprj,
dims=("t", "y", "x"),
coords=dict(
t=("t", R_src.coords["t"].data),
x=("x", R_dst.coords["x"].data),
y=("y", R_dst.coords["y"].data),
),
)
R_rprj.attrs.update(R_src.attrs)
R_rprj.x.attrs.update(R_dst.x.attrs)
R_rprj.y.attrs.update(R_dst.y.attrs)
for key in ["projection", "yorigin", "xpixelsize", "ypixelsize"]:
R_rprj.attrs[key] = R_dst.attrs[key]
return R_rprj
|
def reprojection(src_array, dst_array):
"""Reprojects precipitation fields to the domain of another precipiation
field.
Parameters
----------
R_src: xarray
Three-dimensional xarray with dimensions (t, x, y) containing a
time series of precipitation fields. These precipitaiton fields
will be reprojected.
R_dst: xarray
Xarray containing a precipitation field or a time series of precipitation
fields. The xarray R_src will be reprojected to the domain of R_dst.
Returns
-------
R_rprj: xarray
Three-dimensional xarray with dimensions (t, x, y) containing the
precipitation fields of R_src, but reprojected to the domain of
R_dst.
"""
# Extract the grid info from R_src
src_crs = R_src.attrs["projection"]
x1_src = R_src.x.attrs["x1"]
y2_src = R_src.y.attrs["y2"]
xpixelsize_src = R_src.attrs["xpixelsize"]
ypixelsize_src = R_src.attrs["ypixelsize"]
src_transform = A.translation(float(x1_src), float(y2_src)) * A.scale(
float(xpixelsize_src), float(-ypixelsize_src)
)
# Extract the grid info from R_dst
dst_crs = R_dst.attrs["projection"]
x1_dst = R_dst.x.attrs["x1"]
y2_dst = R_dst.y.attrs["y2"]
xpixelsize_dst = R_dst.attrs["xpixelsize"]
ypixelsize_dst = R_dst.attrs["ypixelsize"]
dst_transform = A.translation(float(x1_dst), float(y2_dst)) * A.scale(
float(xpixelsize_dst), float(-ypixelsize_dst)
)
# Initialise the reprojected (x)array
R_rprj = np.zeros((R_src.shape[0], R_dst.shape[-2], R_dst.shape[-1]))
# For every timestep, reproject the precipitation field of R_src to
# the domain of R_dst
if R_src.attrs["yorigin"] != R_dst.attrs["yorigin"]:
R_src = R_src[:, ::-1, :]
for i in range(R_src.shape[0]):
reproject(
R_src.values[i, :, :],
R_rprj[i, :, :],
src_transform=src_transform,
src_crs=src_crs,
dst_transform=dst_transform,
dst_crs=dst_crs,
resampling=Resampling.nearest,
dst_nodata=np.nan,
)
# Assign the necessary attributes from R_src and R_dst to R_rprj
R_rprj = xr.DataArray(
data=R_rprj,
dims=("t", "y", "x"),
coords=dict(
t=("t", R_src.coords["t"].data),
x=("x", R_dst.coords["x"].data),
y=("y", R_dst.coords["y"].data),
),
)
R_rprj.attrs.update(R_src.attrs)
R_rprj.x.attrs.update(R_dst.x.attrs)
R_rprj.y.attrs.update(R_dst.y.attrs)
for key in ["projection", "yorigin", "xpixelsize", "ypixelsize"]:
R_rprj.attrs[key] = R_dst.attrs[key]
return R_rprj
|
28,060 |
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='file/folder',
help="The analysis result files and/or folders "
"containing analysis results which should be "
"parsed and printed.")
parser.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'parse' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"parse\": [\n"
" \"--trim-path-prefix\",\n"
" \"$HOME/workspace\"\n"
" ]\n"
"}")
parser.add_argument('-t', '--type', '--input-format',
dest="input_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results were "
"created as.")
output_opts = parser.add_argument_group("export arguments")
output_opts.add_argument('-e', '--export',
dest="export",
required=False,
choices=EXPORT_TYPES,
help="R|Specify extra output format type.\n"
"'codeclimate' format can be used for "
"Code Climate and for GitLab integration. "
"For more information see:\n"
"https://github.com/codeclimate/platform/"
"blob/master/spec/analyzers/SPEC.md"
"#data-types\n"
"'baseline' output can be used to integrate "
"CodeChecker into your local workflow "
"without using a CodeChecker server. For "
"more information see our usage guide.")
output_opts.add_argument('-o', '--output',
dest="output_path",
default=argparse.SUPPRESS,
help="Store the output in the given file/folder. "
"Note: baseline files must have extension "
"'.baseline'.")
parser.add_argument('--suppress',
type=str,
dest="suppress",
default=argparse.SUPPRESS,
required=False,
help="Path of the suppress file to use. Records in "
"the suppress file are used to suppress the "
"display of certain results when parsing the "
"analyses' report. (Reports to an analysis "
"result can also be suppressed in the source "
"code -- please consult the manual on how to "
"do so.) NOTE: The suppress file relies on the "
"\"bug identifier\" generated by the analyzers "
"which is experimental, take care when relying "
"on it.")
parser.add_argument('--export-source-suppress',
dest="create_suppress",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Write suppress data from the suppression "
"annotations found in the source files that were "
"analyzed earlier that created the results. "
"The suppression information will be written "
"to the parameter of '--suppress'.")
parser.add_argument('--print-steps',
dest="print_steps",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Print the steps the analyzers took in finding "
"the reported defect.")
parser.add_argument('--trim-path-prefix',
type=str,
nargs='*',
dest="trim_path_prefix",
required=False,
default=argparse.SUPPRESS,
help="Removes leading path from files which will be "
"printed. So if you have /a/b/c/x.cpp and "
"/a/b/c/y.cpp then by removing \"/a/b/\" prefix "
"will print files like c/x.cpp and c/y.cpp. "
"If multiple prefix is given, the longest match "
"will be removed.")
parser.add_argument('--review-status',
nargs='*',
dest="review_status",
metavar='REVIEW_STATUS',
choices=REVIEW_STATUS_VALUES,
default=["confirmed", "unreviewed"],
help="Filter results by review statuses. Valid "
"values are: {0}".format(
', '.join(REVIEW_STATUS_VALUES)))
group = parser.add_argument_group("file filter arguments")
group = group.add_mutually_exclusive_group()
group.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. Please "
"consult the User guide on how a Skipfile "
"should be laid out.")
group.add_argument('--file',
nargs='*',
dest="files",
metavar='FILE',
required=False,
default=argparse.SUPPRESS,
help="Filter results by file path. "
"The file path can contain multiple * "
"quantifiers which matches any number of "
"characters (zero or more). So if you have "
"/a/x.cpp and /a/y.cpp then \"/a/*.cpp\" "
"selects both.")
logger.add_verbose_arguments(parser)
parser.set_defaults(
func=main, func_process_config_file=cmd_config.process_config_file)
|
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='file/folder',
help="The analysis result files and/or folders "
"containing analysis results which should be "
"parsed and printed.")
parser.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'parse' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"parse\": [\n"
" \"--trim-path-prefix\",\n"
" \"$HOME/workspace\"\n"
" ]\n"
"}")
parser.add_argument('-t', '--type', '--input-format',
dest="input_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results were "
"created as.")
output_opts = parser.add_argument_group("export arguments")
output_opts.add_argument('-e', '--export',
dest="export",
required=False,
choices=EXPORT_TYPES,
help="R|Specify extra output format type.\n"
"'codeclimate' format can be used for "
"Code Climate and for GitLab integration. "
"For more information see:\n"
"https://github.com/codeclimate/platform/"
"blob/master/spec/analyzers/SPEC.md"
"#data-types\n"
"'baseline' output can be used to integrate "
"CodeChecker into your local workflow "
"without using a CodeChecker server. For "
"more information see our usage guide.")
output_opts.add_argument('-o', '--output',
dest="output_path",
default=argparse.SUPPRESS,
help="Store the output in the given file/folder. "
"Note: baseline files must have extension "
"'.baseline'.")
parser.add_argument('--suppress',
type=str,
dest="suppress",
default=argparse.SUPPRESS,
required=False,
help="Path of the suppress file to use. Records in "
"the suppress file are used to suppress the "
"display of certain results when parsing the "
"analyses' report. (Reports to an analysis "
"result can also be suppressed in the source "
"code -- please consult the manual on how to "
"do so.) NOTE: The suppress file relies on the "
"\"bug identifier\" generated by the analyzers "
"which is experimental, take care when relying "
"on it.")
parser.add_argument('--export-source-suppress',
dest="create_suppress",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Write suppress data from the suppression "
"annotations found in the source files that were "
"analyzed earlier that created the results. "
"The suppression information will be written "
"to the parameter of '--suppress'.")
parser.add_argument('--print-steps',
dest="print_steps",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Print the steps the analyzers took in finding "
"the reported defect.")
parser.add_argument('--trim-path-prefix',
type=str,
nargs='*',
dest="trim_path_prefix",
required=False,
default=argparse.SUPPRESS,
help="Removes leading path from files which will be "
"printed. So if you have /a/b/c/x.cpp and "
"/a/b/c/y.cpp then by removing \"/a/b/\" prefix "
"will print files like c/x.cpp and c/y.cpp. "
"If multiple prefix is given, the longest match "
"will be removed.")
parser.add_argument('--review-status',
nargs='*',
dest="review_status",
metavar='REVIEW_STATUS',
choices=REVIEW_STATUS_VALUES,
default=["confirmed", "unreviewed"],
help="Filter results by review statuses. Valid "
"values are: {0}".format(
', '.join(REVIEW_STATUS_VALUES)))
group = parser.add_argument_group("file filter arguments")
group = group.add_mutually_exclusive_group()
group.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. Please "
"consult the User guide on how a Skipfile "
"should be laid out.")
group.add_argument('--file',
nargs='+',
dest="files",
metavar='FILE',
required=False,
default=argparse.SUPPRESS,
help="Filter results by file path. "
"The file path can contain multiple * "
"quantifiers which matches any number of "
"characters (zero or more). So if you have "
"/a/x.cpp and /a/y.cpp then \"/a/*.cpp\" "
"selects both.")
logger.add_verbose_arguments(parser)
parser.set_defaults(
func=main, func_process_config_file=cmd_config.process_config_file)
|
6,210 |
def _makeComponentDict(component, setupDict, installedDict, compType, system, runitDict):
componentDict = {
'Setup': component in setupDict.get(compType, []).get(system, []),
'Installed': component in installedDict.get(compType, []).get(system, []),
'RunitStatus': 'Unknown',
'Timeup': 0,
'PID': 0,
}
compDir = system + '_' + component
if compDir in runitDict:
componentDict['RunitStatus'] = runitDict[compDir]['RunitStatus']
componentDict['Timeup'] = runitDict[compDir]['Timeup']
componentDict['PID'] = _safeInt(runitDict[compDir].get('PID', -1))
componentDict['CPU'] = _safeFloat(runitDict[compDir].get('CPU', -1))
componentDict['MEM'] = _safeFloat(runitDict[compDir].get('MEM', -1))
componentDict['RSS'] = _safeFloat(runitDict[compDir].get('RSS', -1))
componentDict['VSZ'] = _safeFloat(runitDict[compDir].get('VSZ', -1))
return componentDict
|
def _makeComponentDict(component, setupDict, installedDict, compType, system, runitDict):
componentDict = {
'Setup': component in setupDict.get(compType, []).get(system, []),
'Installed': component in installedDict.get(compType, {}).get(system, {}),
'RunitStatus': 'Unknown',
'Timeup': 0,
'PID': 0,
}
compDir = system + '_' + component
if compDir in runitDict:
componentDict['RunitStatus'] = runitDict[compDir]['RunitStatus']
componentDict['Timeup'] = runitDict[compDir]['Timeup']
componentDict['PID'] = _safeInt(runitDict[compDir].get('PID', -1))
componentDict['CPU'] = _safeFloat(runitDict[compDir].get('CPU', -1))
componentDict['MEM'] = _safeFloat(runitDict[compDir].get('MEM', -1))
componentDict['RSS'] = _safeFloat(runitDict[compDir].get('RSS', -1))
componentDict['VSZ'] = _safeFloat(runitDict[compDir].get('VSZ', -1))
return componentDict
|
41,498 |
def fit(data, pdf, init_pars=None, par_bounds=None, **kwargs):
"""
Run a unconstrained maximum likelihood fit.
.. note::
:func:`twice_nll` is the objective function given to the optimizer and
is returned evaluated at the best fit model parameters when the optional
kwarg ``return_fitted_val`` is ``True``.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> bestfit_pars, twice_nll = pyhf.infer.mle.fit(data, model, return_fitted_val=True)
>>> bestfit_pars
array([0. , 1.0030512 , 0.96266961])
>>> twice_nll
array([24.98393521])
>>> -2 * model.logpdf(bestfit_pars, data) == twice_nll
array([ True])
Args:
data (`tensor`): The data
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
kwargs: Keyword arguments passed through to the optimizer API
Returns:
See optimizer API
"""
_, opt = get_backend()
init_pars = init_pars or pdf.config.suggested_init()
par_bounds = par_bounds or pdf.config.suggested_bounds()
return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs)
|
def fit(data, pdf, init_pars=None, par_bounds=None, **kwargs):
"""
Run a unconstrained maximum likelihood fit.
.. note::
:func:`twice_nll` is the objective function given to the optimizer and
is returned evaluated at the best fit model parameters when the optional
kwarg ``return_fitted_val`` is ``True``.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> bestfit_pars, twice_nll = pyhf.infer.mle.fit(data, model, return_fitted_val=True)
>>> bestfit_pars
array([0. , 1.0030512 , 0.96266961])
>>> twice_nll
array([24.98393521])
>>> pyhf.infer.mle.twice_nll(bestfit_pars, data, model) == twice_nll
array([ True])
Args:
data (`tensor`): The data
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
kwargs: Keyword arguments passed through to the optimizer API
Returns:
See optimizer API
"""
_, opt = get_backend()
init_pars = init_pars or pdf.config.suggested_init()
par_bounds = par_bounds or pdf.config.suggested_bounds()
return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs)
|
30,319 |
def shutdown_agents_request(query, agent_id, group_id):
endpoint_url = 'agents/actions/shutdown'
filters = {}
if query:
filters['query'] = query
if agent_id:
filters['ids'] = agent_id
if group_id:
filters['groupIds'] = group_id
payload = {
"filter": filters,
"data": {}
}
response = http_request('POST', endpoint_url, data=json.dumps(payload))
if response.get('errors'):
return_error(response.get('errors'))
else:
return response
|
def shutdown_agents_request(query, agent_id, group_id):
endpoint_url = 'agents/actions/shutdown'
filters = {}
if query:
filters['query'] = query
if agent_id:
filters['ids'] = agent_id
if group_id:
filters['groupIds'] = group_id
payload = {
'filter': filters,
"data": {}
}
response = http_request('POST', endpoint_url, data=json.dumps(payload))
if response.get('errors'):
return_error(response.get('errors'))
else:
return response
|
20,488 |
def type_change_account_fiscal_position_zips(env):
tables = ['account_fiscal_position', 'account_fiscal_position_template']
zip_fields = ['zip_from', 'zip_to']
for table in tables:
for field in zip_fields:
openupgrade.logged_query(
env.cr,
"ALTER TABLE %(table)s "
"ALTER COLUMN %(field)s "
"TYPE varchar" % {
'table': table,
'field': field,
})
openupgrade.logged_query(
env.cr,
"UPDATE %(table)s "
"SET zip_from = NULL, zip_to = NULL "
"WHERE AND zip_from = '0' AND zip_to = '0'" % {
'table': table,
})
|
def type_change_account_fiscal_position_zips(env):
tables = ['account_fiscal_position', 'account_fiscal_position_template']
zip_fields = ['zip_from', 'zip_to']
for table in tables:
for field in zip_fields:
openupgrade.logged_query(
env.cr,
"ALTER TABLE %(table)s "
"ALTER COLUMN %(field)s "
"TYPE varchar" % {
'table': table,
'field': field,
})
openupgrade.logged_query(
env.cr,
"UPDATE %(table)s "
"SET zip_from = NULL, zip_to = NULL "
"WHERE zip_from = '0' AND zip_to = '0'" % {
'table': table,
})
|
58,055 |
def main():
args = arguments_handler()
ref_branch = args.branch_name
debug_mode = len(sys.argv) >= 2 and 'debug' in sys.argv[1].casefold()
if debug_mode:
enable_console_debug_logging()
gh = Github(os.getenv('CONTENTBOT_GH_ADMIN_TOKEN'), verify=False)
organization = 'demisto'
repo = 'content'
content_repo = gh.get_repo(f'{organization}/{repo}')
master_sha = get_master_commit_sha(content_repo)
if ref_branch:
update_branch(content_repo, ref_branch, master_sha)
return
contrib_base_branches = get_branch_names_with_contrib(content_repo)
print(f'updating {contrib_base_branches=}')
for branch_name in contrib_base_branches:
update_branch(content_repo, branch_name, master_sha)
|
def main():
args = arguments_handler()
ref_branch = args.branch_name
debug_mode = len(sys.argv) >= 2 and 'debug' in sys.argv[1].casefold()
if debug_mode:
enable_console_debug_logging()
gh = Github(os.getenv('CONTENTBOT_GH_ADMIN_TOKEN'), verify=False)
organization = 'demisto'
repo = 'content'
content_repo = gh.get_repo(f'{organization}/{repo}')
master_sha = get_master_commit_sha(content_repo)
if ref_branch:
# Case this flow was triggered on a specific branch
contrib_base_branches = [ref_branch]
else:
# Case we are running scheduled job - detect all contrib/ base branches.
contrib_base_branches = get_branch_names_with_contrib(content_repo)
print(f'Updating {contrib_base_branches=}')
for branch_name in contrib_base_branches:
update_branch(content_repo, branch_name, master_sha)
|
31,704 |
def get_endpoint_properties(single_endpoint):
status = 'Online' if single_endpoint.get('endpoint_status').lower() == 'connected' else 'Offline'
is_isolated = 'No' if 'unisolated' in single_endpoint.get('is_isolated', '').lower() else 'Yes'
hostname = single_endpoint['host_name'] if single_endpoint.get('host_name', '') else single_endpoint.get(
'endpoint_name')
ip = single_endpoint.get('ip')
return status, is_isolated, hostname, ip
|
def get_endpoint_properties(single_endpoint):
status = 'Online' if single_endpoint.get('endpoint_status', '').lower() == 'connected' else 'Offline'
is_isolated = 'No' if 'unisolated' in single_endpoint.get('is_isolated', '').lower() else 'Yes'
hostname = single_endpoint['host_name'] if single_endpoint.get('host_name', '') else single_endpoint.get(
'endpoint_name')
ip = single_endpoint.get('ip')
return status, is_isolated, hostname, ip
|
49,321 |
def _deserialize_data(event, event_type):
"""
Sets the data of the desrialized event to strongly typed event object if event type exists in _event_mappings.
Otherwise, sets it to None.
:param str event_type: The event_type of the EventGridEvent object or the type of the CloudEvent object.
"""
# if system event type defined, set event.data to system event object
try:
event.data = (_event_mappings[event_type]).deserialize(event.data)
except KeyError: # else, if custom event, then event.data is dict and should be set to None
event.data = None
|
def _deserialize_data(event, event_type):
"""
Sets the data of the desrialized event to strongly typed event object if event type exists in _event_mappings.
Otherwise, sets it to None.
:param str event_type: The event_type of the EventGridEvent object or the type of the CloudEvent object.
"""
# if system event type defined, set event.data to system event object
try:
event.data = (_event_mappings[event_type]).deserialize(event.data)
except KeyError: # else, if custom event, then event.data is dict and should be set to None
event.data = None
|
25,781 |
def get_clustering_from_busmap(network, busmap, with_time=True, global_constraints=True, line_length_factor=1.0,
aggregate_generators_weighted=False, aggregate_one_ports={},
aggregate_generators_carriers=None,
scale_link_capital_costs=True,
bus_strategies=dict(), one_port_strategies=dict(),
generator_strategies=dict()):
buses, linemap, linemap_p, linemap_n, lines = get_buses_linemap_and_lines(network, busmap, line_length_factor, bus_strategies)
network_c = Network()
io.import_components_from_dataframe(network_c, buses, "Bus")
io.import_components_from_dataframe(network_c, lines, "Line")
# Carry forward global constraints to clustered network.
if global_constraints:
network_c.global_constraints = network.global_constraints
if with_time:
network_c.set_snapshots(network.snapshots)
network_c.snapshot_weightings = network.snapshot_weightings.copy()
one_port_components = network.one_port_components.copy()
if aggregate_generators_weighted:
one_port_components.remove("Generator")
generators, generators_pnl = aggregategenerators(network, busmap, with_time=with_time,
carriers=aggregate_generators_carriers,
custom_strategies=generator_strategies)
io.import_components_from_dataframe(network_c, generators, "Generator")
if with_time:
for attr, df in generators_pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Generator", attr)
for one_port in aggregate_one_ports:
one_port_components.remove(one_port)
new_df, new_pnl = aggregateoneport(network, busmap, component=one_port, with_time=with_time,
custom_strategies=one_port_strategies.get(one_port, {}))
io.import_components_from_dataframe(network_c, new_df, one_port)
for attr, df in new_pnl.items():
io.import_series_from_dataframe(network_c, df, one_port, attr)
##
# Collect remaining one ports
for c in network.iterate_components(one_port_components):
io.import_components_from_dataframe(
network_c,
c.df.assign(bus=c.df.bus.map(busmap)).dropna(subset=['bus']),
c.name
)
if with_time:
for c in network.iterate_components(one_port_components):
for attr, df in c.pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, c.name, attr)
new_links = (network.links.assign(bus0=network.links.bus0.map(busmap),
bus1=network.links.bus1.map(busmap))
.dropna(subset=['bus0', 'bus1'])
.loc[lambda df: df.bus0 != df.bus1])
new_links['length'] = np.where(
new_links.length.notnull() & (new_links.length > 0),
line_length_factor *
haversine_pts(buses.loc[new_links['bus0'], ['x', 'y']],
buses.loc[new_links['bus1'], ['x', 'y']]),
0
)
if scale_link_capital_costs:
new_links['capital_cost'] *= (new_links.length/network.links.length).fillna(1)
io.import_components_from_dataframe(network_c, new_links, "Link")
if with_time:
for attr, df in network.links_t.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Link", attr)
io.import_components_from_dataframe(network_c, network.carriers, "Carrier")
network_c.determine_network_topology()
return Clustering(network_c, busmap, linemap, linemap_p, linemap_n)
|
def get_clustering_from_busmap(network, busmap, with_time=True, line_length_factor=1.0,
aggregate_generators_weighted=False, aggregate_one_ports={},
aggregate_generators_carriers=None,
scale_link_capital_costs=True,
bus_strategies=dict(), one_port_strategies=dict(),
generator_strategies=dict()):
buses, linemap, linemap_p, linemap_n, lines = get_buses_linemap_and_lines(network, busmap, line_length_factor, bus_strategies)
network_c = Network()
io.import_components_from_dataframe(network_c, buses, "Bus")
io.import_components_from_dataframe(network_c, lines, "Line")
# Carry forward global constraints to clustered network.
if global_constraints:
network_c.global_constraints = network.global_constraints
if with_time:
network_c.set_snapshots(network.snapshots)
network_c.snapshot_weightings = network.snapshot_weightings.copy()
one_port_components = network.one_port_components.copy()
if aggregate_generators_weighted:
one_port_components.remove("Generator")
generators, generators_pnl = aggregategenerators(network, busmap, with_time=with_time,
carriers=aggregate_generators_carriers,
custom_strategies=generator_strategies)
io.import_components_from_dataframe(network_c, generators, "Generator")
if with_time:
for attr, df in generators_pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Generator", attr)
for one_port in aggregate_one_ports:
one_port_components.remove(one_port)
new_df, new_pnl = aggregateoneport(network, busmap, component=one_port, with_time=with_time,
custom_strategies=one_port_strategies.get(one_port, {}))
io.import_components_from_dataframe(network_c, new_df, one_port)
for attr, df in new_pnl.items():
io.import_series_from_dataframe(network_c, df, one_port, attr)
##
# Collect remaining one ports
for c in network.iterate_components(one_port_components):
io.import_components_from_dataframe(
network_c,
c.df.assign(bus=c.df.bus.map(busmap)).dropna(subset=['bus']),
c.name
)
if with_time:
for c in network.iterate_components(one_port_components):
for attr, df in c.pnl.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, c.name, attr)
new_links = (network.links.assign(bus0=network.links.bus0.map(busmap),
bus1=network.links.bus1.map(busmap))
.dropna(subset=['bus0', 'bus1'])
.loc[lambda df: df.bus0 != df.bus1])
new_links['length'] = np.where(
new_links.length.notnull() & (new_links.length > 0),
line_length_factor *
haversine_pts(buses.loc[new_links['bus0'], ['x', 'y']],
buses.loc[new_links['bus1'], ['x', 'y']]),
0
)
if scale_link_capital_costs:
new_links['capital_cost'] *= (new_links.length/network.links.length).fillna(1)
io.import_components_from_dataframe(network_c, new_links, "Link")
if with_time:
for attr, df in network.links_t.items():
if not df.empty:
io.import_series_from_dataframe(network_c, df, "Link", attr)
io.import_components_from_dataframe(network_c, network.carriers, "Carrier")
network_c.determine_network_topology()
return Clustering(network_c, busmap, linemap, linemap_p, linemap_n)
|
21,645 |
def setup(config_options):
"""
Args:
config_options_options: The options passed to Synapse. Usually
`sys.argv[1:]`.
Returns:
HomeServer
"""
try:
config = HomeServerConfig.load_or_generate_config(
"Synapse Homeserver", config_options
)
except ConfigError as e:
sys.stderr.write("\n")
for f in format_config_error(e):
sys.stderr.write(f)
sys.stderr.write("\n")
sys.exit(1)
if not config:
# If a config isn't returned, and an exception isn't raised, we're just
# generating config files and shouldn't try to continue.
sys.exit(0)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
synapse.util.caches.lrucache.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
hs = SynapseHomeServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
logger.info("Setting up server")
try:
hs.setup()
except IncorrectDatabaseSetup as e:
quit_with_error(str(e))
except UpgradeDatabaseException as e:
quit_with_error("Failed to upgrade database: %s" % (e,))
async def do_acme() -> bool:
"""
Reprovision an ACME certificate, if it's required.
Returns:
Whether the cert has been updated.
"""
acme = hs.get_acme_handler()
# Check how long the certificate is active for.
cert_days_remaining = hs.config.is_disk_cert_valid(allow_self_signed=False)
# We want to reprovision if cert_days_remaining is None (meaning no
# certificate exists), or the days remaining number it returns
# is less than our re-registration threshold.
provision = False
if (
cert_days_remaining is None
or cert_days_remaining < hs.config.acme_reprovision_threshold
):
provision = True
if provision:
await acme.provision_certificate()
return provision
async def reprovision_acme():
"""
Provision a certificate from ACME, if required, and reload the TLS
certificate if it's renewed.
"""
reprovisioned = await do_acme()
if reprovisioned:
_base.refresh_certificate(hs)
async def start():
# Run the ACME provisioning code, if it's enabled.
if hs.config.acme_enabled:
acme = hs.get_acme_handler()
# Start up the webservices which we will respond to ACME
# challenges with, and then provision.
await acme.start_listening()
await do_acme()
# Check if it needs to be reprovisioned every day.
hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000)
# Load the OIDC provider metadatas, if OIDC is enabled.
if hs.config.oidc_enabled:
oidc = hs.get_oidc_handler()
# Loading the provider metadata also ensures the provider config is valid.
await oidc.load_metadata()
await _base.start(hs, config.listeners)
hs.get_datastore().db_pool.updates.start_doing_background_updates()
register_start(start)
return hs
|
def setup(config_options):
"""
Args:
config_options_options: The options passed to Synapse. Usually
`sys.argv[1:]`.
Returns:
HomeServer
"""
try:
config = HomeServerConfig.load_or_generate_config(
"Synapse Homeserver", config_options
)
except ConfigError as e:
sys.stderr.write("\n")
for f in format_config_error(e):
sys.stderr.write(f)
sys.stderr.write("\n")
sys.exit(1)
if not config:
# If a config isn't returned, and an exception isn't raised, we're just
# generating config files and shouldn't try to continue.
sys.exit(0)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
hs = SynapseHomeServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
logger.info("Setting up server")
try:
hs.setup()
except IncorrectDatabaseSetup as e:
quit_with_error(str(e))
except UpgradeDatabaseException as e:
quit_with_error("Failed to upgrade database: %s" % (e,))
async def do_acme() -> bool:
"""
Reprovision an ACME certificate, if it's required.
Returns:
Whether the cert has been updated.
"""
acme = hs.get_acme_handler()
# Check how long the certificate is active for.
cert_days_remaining = hs.config.is_disk_cert_valid(allow_self_signed=False)
# We want to reprovision if cert_days_remaining is None (meaning no
# certificate exists), or the days remaining number it returns
# is less than our re-registration threshold.
provision = False
if (
cert_days_remaining is None
or cert_days_remaining < hs.config.acme_reprovision_threshold
):
provision = True
if provision:
await acme.provision_certificate()
return provision
async def reprovision_acme():
"""
Provision a certificate from ACME, if required, and reload the TLS
certificate if it's renewed.
"""
reprovisioned = await do_acme()
if reprovisioned:
_base.refresh_certificate(hs)
async def start():
# Run the ACME provisioning code, if it's enabled.
if hs.config.acme_enabled:
acme = hs.get_acme_handler()
# Start up the webservices which we will respond to ACME
# challenges with, and then provision.
await acme.start_listening()
await do_acme()
# Check if it needs to be reprovisioned every day.
hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000)
# Load the OIDC provider metadatas, if OIDC is enabled.
if hs.config.oidc_enabled:
oidc = hs.get_oidc_handler()
# Loading the provider metadata also ensures the provider config is valid.
await oidc.load_metadata()
await _base.start(hs, config.listeners)
hs.get_datastore().db_pool.updates.start_doing_background_updates()
register_start(start)
return hs
|
1,566 |
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None,
normalize=None):
"""Compute confusion matrix to evaluate the accuracy of a classification.
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` and
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like of shape (n_classes), default=None
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If ``None`` is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
Returns
-------
C : ndarray of shape (n_classes, n_classes)
Confusion matrix.
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes)
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if normalize not in {'true', 'pred', 'all', None}:
raise ValueError("normalize must be one of {'true', 'pred', "
"'all', None}")
n_labels = labels.size
label_to_ind = {y: x for x, y in enumerate(labels)}
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {'i', 'u', 'b'}:
dtype = np.int64
else:
dtype = np.float64
cm = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels), dtype=dtype,
).toarray()
with np.errstate(all='ignore'):
if normalize == 'true':
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == 'pred':
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == 'all':
cm = cm / cm.sum()
cm = np.nan_to_num(cm)
return cm
|
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None,
normalize=None):
"""Compute confusion matrix to evaluate the accuracy of a classification.
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` and
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like of shape (n_classes), default=None
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If ``None`` is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
Returns
-------
C : ndarray of shape (n_classes, n_classes)
Confusion matrix.
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes)
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if normalize not in ['true', 'pred', 'all', None]:
raise ValueError("normalize must be one of {'true', 'pred', "
"'all', None}")
n_labels = labels.size
label_to_ind = {y: x for x, y in enumerate(labels)}
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {'i', 'u', 'b'}:
dtype = np.int64
else:
dtype = np.float64
cm = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels), dtype=dtype,
).toarray()
with np.errstate(all='ignore'):
if normalize == 'true':
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == 'pred':
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == 'all':
cm = cm / cm.sum()
cm = np.nan_to_num(cm)
return cm
|
57,848 |
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names)
if pack.is_missing_dependencies:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index:
logging.info(f"{pack.name} pack status is {PackStatus.PACK_ALREADY_EXISTS.name}")
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# will go over all the packs what was marked as missing dependencies and will update them with the new index.json
for pack in packs_missing_dependencies:
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names, True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
artifacts_dir=os.path.dirname(packs_artifacts_path))
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names)
if pack.is_missing_dependencies:
# If the pack is dependent on a new pack (which is not yet in the index.zip as it might not have been iterated yet)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.zip - i.e. the new pack exists now.
# We will go over the pack again to add what was missing.
# See issue #37290
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index:
logging.info(f"{pack.name} pack status is {PackStatus.PACK_ALREADY_EXISTS.name}")
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# will go over all the packs what was marked as missing dependencies and will update them with the new index.json
for pack in packs_missing_dependencies:
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names, True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
artifacts_dir=os.path.dirname(packs_artifacts_path))
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
50,445 |
def get_pages_and_resources_url(course_module):
"""
Gets course authoring microfrontend URL for Pages and Resources view.
"""
if ENABLE_PAGES_AND_RESOURCES_MICROFRONTEND.is_enabled(course_module.id):
mfe_base_url = get_course_authoring_url(course_module)
if mfe_base_url:
return f'{mfe_base_url}/course/{course_module.id}/pages-and-resources'
return None
|
def get_pages_and_resources_url(course_module):
"""
Gets course authoring microfrontend URL for Pages and Resources view.
"""
pages_and_resources_url = None
if ENABLE_PAGES_AND_RESOURCES_MICROFRONTEND.is_enabled(course_module.id):
mfe_base_url = get_course_authoring_url(course_module)
if mfe_base_url:
pages_and_resources_url = f'{mfe_base_url}/course/{course_module.id}/pages-and-resources'
return pages_and_resources_url
|
21,221 |
def init(path, apps_path=None, no_procfile=False, no_backups=False,
frappe_path=None, frappe_branch=None, verbose=False, clone_from=None,
skip_redis_config_generation=False, clone_without_update=False, clone_full=False,
ignore_exist=False, skip_assets=False,
python='python3'):
"""Initialize a new bench directory"""
from bench.app import get_app, install_apps_from_path
from bench.config import redis
from bench.config.common_site_config import make_config
from bench.config.procfile import setup_procfile
from bench.patches import set_all_patches_executed
if os.path.exists(path) and not ignore_exist:
log('Path {path} already exists!'.format(path=path))
sys.exit(0)
elif not os.path.exists(path):
# only create dir if it does not exist
os.makedirs(path)
for dirname in folders_in_bench:
try:
os.makedirs(os.path.join(path, dirname))
except OSError as e:
if e.errno == errno.EEXIST:
pass
setup_logging(bench_path=path)
setup_env(bench_path=path, python=python)
extra_config = None
if clone_full:
extra_config = {
'shallow_clone': False
}
make_config(path, extra_config)
if clone_from:
clone_apps_from(bench_path=path, clone_from=clone_from, update_app=not clone_without_update)
else:
if not frappe_path:
frappe_path = 'https://github.com/frappe/frappe.git'
get_app(frappe_path, branch=frappe_branch, bench_path=path, skip_assets=True, verbose=verbose)
if apps_path:
install_apps_from_path(apps_path, bench_path=path)
bench.set_frappe_version(bench_path=path)
if bench.FRAPPE_VERSION > 5:
if not skip_assets:
update_node_packages(bench_path=path)
set_all_patches_executed(bench_path=path)
if not skip_assets:
build_assets(bench_path=path)
if not skip_redis_config_generation:
redis.generate_config(path)
if not no_procfile:
setup_procfile(path, skip_redis=skip_redis_config_generation)
if not no_backups:
setup_backups(bench_path=path)
copy_patches_txt(path)
|
def init(path, apps_path=None, no_procfile=False, no_backups=False,
frappe_path=None, frappe_branch=None, verbose=False, clone_from=None,
skip_redis_config_generation=False, clone_without_update=False, clone_full=False,
ignore_exist=False, skip_assets=False,
python='python3'):
"""Initialize a new bench directory"""
from bench.app import get_app, install_apps_from_path
from bench.config import redis
from bench.config.common_site_config import make_config
from bench.config.procfile import setup_procfile
from bench.patches import set_all_patches_executed
if os.path.exists(path) and not ignore_exist:
log('Path {path} already exists!'.format(path=path))
sys.exit(0)
elif not os.path.exists(path):
# only create dir if it does not exist
os.makedirs(path)
for dirname in folders_in_bench:
try:
os.makedirs(os.path.join(path, dirname))
except OSError as e:
if e.errno == errno.EEXIST:
pass
setup_logging(bench_path=path)
setup_env(bench_path=path, python=python)
make_config(path, shallow_clone=not clone_full)
if clone_from:
clone_apps_from(bench_path=path, clone_from=clone_from, update_app=not clone_without_update)
else:
if not frappe_path:
frappe_path = 'https://github.com/frappe/frappe.git'
get_app(frappe_path, branch=frappe_branch, bench_path=path, skip_assets=True, verbose=verbose)
if apps_path:
install_apps_from_path(apps_path, bench_path=path)
bench.set_frappe_version(bench_path=path)
if bench.FRAPPE_VERSION > 5:
if not skip_assets:
update_node_packages(bench_path=path)
set_all_patches_executed(bench_path=path)
if not skip_assets:
build_assets(bench_path=path)
if not skip_redis_config_generation:
redis.generate_config(path)
if not no_procfile:
setup_procfile(path, skip_redis=skip_redis_config_generation)
if not no_backups:
setup_backups(bench_path=path)
copy_patches_txt(path)
|
10,510 |
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
|
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, (datetime.datetime, datetime.date)):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
|
31,991 |
def main() -> None:
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
url = params.get('url')
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers: Dict[str, str] = {}
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
global SERVICE_ID
requests.packages.urllib3.disable_warnings()
client: Client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth={})
client.get_token(params.get('credentials'))
SERVICE_ID = params.get('service_id')
fetch_time = params.get('fetch_time', '1 days')
fetch_limit = params.get('fetch_limit', '100')
commands = {
'rsa-nw-list-incidents': list_incidents_command,
'rsa-nw-update-incident': update_incident_command,
'rsa-nw-remove-incident': remove_incident_command,
'rsa-nw-incident-add-journal-entry': incident_add_journal_entry_command,
'rsa-nw-incident-list-alerts': incident_list_alerts_command,
'rsa-nw-services-list': services_list_command,
'rsa-nw-hosts-list': hosts_list_command,
'endpoint': endpoint_command,
'rsa-nw-snapshots-list-for-host': snapshots_list_for_host_command,
'rsa-nw-snapshot-details-get': snapshot_details_get_command,
'rsa-nw-files-list': files_list_command,
'rsa-nw-scan-request': scan_request_command,
'rsa-nw-scan-stop-request': scan_stop_request_command,
'rsa-nw-host-alerts-list': host_alerts_list_command,
'rsa-nw-file-alerts-list': file_alerts_list_command,
'rsa-nw-file-download': file_download_command,
'rsa-nw-mft-download-request': mft_download_request_command,
'rsa-nw-system-dump-download-request': system_dump_download_request_command,
'rsa-nw-process-dump-download-request': process_dump_download_request_command,
'rsa-nw-endpoint-isolate-from-network': endpoint_isolate_from_network_command,
'rsa-nw-endpoint-update-exclusions': endpoint_update_exclusions_command,
'rsa-nw-endpoint-isolation-remove': endpoint_isolation_remove_command,
}
if command == 'test-module':
test_module(client, params)
elif command == 'fetch-incidents':
incidents = fetch_incidents(client, fetch_time, fetch_limit)
demisto.incidents(incidents)
elif command == 'endpoint':
return_results(endpoint_command(client, args))
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
def main() -> None:
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
url = params.get('url')
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers: Dict[str, str] = {}
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
global SERVICE_ID
requests.packages.urllib3.disable_warnings()
client: Client = Client(url, verify_certificate, proxy, headers=headers)
client.get_token(params.get('credentials'))
SERVICE_ID = params.get('service_id')
fetch_time = params.get('fetch_time', '1 days')
fetch_limit = params.get('fetch_limit', '100')
commands = {
'rsa-nw-list-incidents': list_incidents_command,
'rsa-nw-update-incident': update_incident_command,
'rsa-nw-remove-incident': remove_incident_command,
'rsa-nw-incident-add-journal-entry': incident_add_journal_entry_command,
'rsa-nw-incident-list-alerts': incident_list_alerts_command,
'rsa-nw-services-list': services_list_command,
'rsa-nw-hosts-list': hosts_list_command,
'endpoint': endpoint_command,
'rsa-nw-snapshots-list-for-host': snapshots_list_for_host_command,
'rsa-nw-snapshot-details-get': snapshot_details_get_command,
'rsa-nw-files-list': files_list_command,
'rsa-nw-scan-request': scan_request_command,
'rsa-nw-scan-stop-request': scan_stop_request_command,
'rsa-nw-host-alerts-list': host_alerts_list_command,
'rsa-nw-file-alerts-list': file_alerts_list_command,
'rsa-nw-file-download': file_download_command,
'rsa-nw-mft-download-request': mft_download_request_command,
'rsa-nw-system-dump-download-request': system_dump_download_request_command,
'rsa-nw-process-dump-download-request': process_dump_download_request_command,
'rsa-nw-endpoint-isolate-from-network': endpoint_isolate_from_network_command,
'rsa-nw-endpoint-update-exclusions': endpoint_update_exclusions_command,
'rsa-nw-endpoint-isolation-remove': endpoint_isolation_remove_command,
}
if command == 'test-module':
test_module(client, params)
elif command == 'fetch-incidents':
incidents = fetch_incidents(client, fetch_time, fetch_limit)
demisto.incidents(incidents)
elif command == 'endpoint':
return_results(endpoint_command(client, args))
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
14,348 |
def _curve_line_intersections_t(curve, line):
aligned_curve = _alignment_transformation(line).transformPoints(curve)
if len(curve) == 3:
a, b, c = calcCubicParameters(*aligned_curve)
intersections = solveQuadratic(a[0], b[0], c[0])
intersections.extend(solveQuadratic(a[1], b[1], c[1]))
elif len(curve) == 4:
a, b, c, d = calcCubicParameters(*aligned_curve)
intersections = solveCubic(a[0], b[0], c[0], d[0])
intersections.extend(solveCubic(a[1], b[1], c[1], d[1]))
else:
raise ValueError("Unknown curve degree")
return sorted([i for i in intersections if (0.0 <= i <= 1)])
|
def _curve_line_intersections_t(curve, line):
aligned_curve = _alignment_transformation(line).transformPoints(curve)
if len(curve) == 3:
a, b, c = calcCubicParameters(*aligned_curve)
intersections = solveQuadratic(a[0], b[0], c[0])
intersections.extend(solveQuadratic(a[1], b[1], c[1]))
elif len(curve) == 4:
a, b, c, d = calcCubicParameters(*aligned_curve)
intersections = solveCubic(a[0], b[0], c[0], d[0])
intersections.extend(solveCubic(a[1], b[1], c[1], d[1]))
else:
raise ValueError("Unknown curve degree")
return sorted(i for i in intersections if 0.0 <= i <= 1)
|
2,293 |
def test_sparse_input_for_predict():
# Test to make sure sparse inputs are accepted for fit_predict
# (non-regression test for issue #20049)
af = AffinityPropagation(affinity="euclidean", random_state=42)
af.fit(X)
labels = af.predict(csr_matrix((2, 2)))
assert_array_equal(labels, (2, 2))
|
def test_sparse_input_for_predict():
# Test to make sure sparse inputs are accepted for predict
# (non-regression test for issue #20049)
af = AffinityPropagation(affinity="euclidean", random_state=42)
af.fit(X)
labels = af.predict(csr_matrix((2, 2)))
assert_array_equal(labels, (2, 2))
|
52,036 |
def set_org_name(required):
"""Generate a callback that enforces the 'required' rule as required"""
# could be generalized to work for any mutex pair (or list) but no obvious need
def callback(ctx, param, value):
"""Callback which enforces mutex and 'required' behaviour (if required)."""
prev_value = ctx.params.get("org_name")
if value and prev_value and prev_value != value:
raise click.UsageError(
f"Either ORGNAME or --org ORGNAME should be supplied, not both ({value}, {prev_value})"
)
ctx.params.setdefault("org_name", value)
if required and not ctx.params.get("org_name"):
raise click.UsageError("Missing argument 'ORGNAME'")
return callback
|
def set_org_name(required):
"""Generate a callback for processing the `org_name` option or argument
`required` is a boolean for whether org_name is required
"""
# could be generalized to work for any mutex pair (or list) but no obvious need
def callback(ctx, param, value):
"""Callback which enforces mutex and 'required' behaviour (if required)."""
prev_value = ctx.params.get("org_name")
if value and prev_value and prev_value != value:
raise click.UsageError(
f"Either ORGNAME or --org ORGNAME should be supplied, not both ({value}, {prev_value})"
)
ctx.params.setdefault("org_name", value)
if required and not ctx.params.get("org_name"):
raise click.UsageError("Missing argument 'ORGNAME'")
return callback
|
43,931 |
def _hermite_coulomb(t, u, v, n, p, dr):
"""Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion
integrals.
These integrals are computed recursively starting from the Boys function
[`Helgaker (1995) p817 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]:
.. math::
R_{000}^n = (-2p)^n F_n(pR_{CP}^2),
where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two
Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the
center of the composite Gaussian centered at :math:`P` and the electrostatic potential at
:math:`C`. The following recursive equations are used to compute the evaluate the higher order
Hermite integrals
.. math::
R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1}
R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1}
R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1}
where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`.
Args:
t (integer): order of Hermite derivative in x
u (integer): order of Hermite derivative in y
v (float): order of Hermite derivative in z
n (integer): order of the Boys function
p (float): sum of the Gaussian exponents
dr (array[float]): distance between the center of the composite Gaussian and the nucleus
Returns:
array[float]: value of the Hermite integral
"""
x, y, z = dr[0], dr[1], dr[2]
T = p * (dr ** 2).sum(axis=0)
r = 0
if t == u == v == 0:
f = []
for term in T.flatten():
f.append(_boys(n, term))
return ((-2 * p) ** n) * anp.array(f).reshape(T.shape)
if t == u == 0:
if v > 1:
r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr)
r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr)
return r
if t == 0:
if u > 1:
r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr)
r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr)
return r
if t > 1:
r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr)
r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr)
return r
|
def _hermite_coulomb(t, u, v, n, p, dr):
"""Evaluate the Hermite integral needed to compute the nuclear attraction and electron repulsion
integrals.
These integrals are computed recursively starting from the Boys function
[`Helgaker (1995) p817 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]:
.. math::
R_{000}^n = (-2p)^n F_n(pR_{CP}^2),
where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two
Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the
center of the composite Gaussian centered at :math:`P` and the electrostatic potential at
:math:`C`. The following recursive equations are used to compute the evaluate the higher order
Hermite integrals
.. math::
R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1}
R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1}
R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1}
where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`.
Args:
t (integer): order of Hermite derivative in x
u (integer): order of Hermite derivative in y
v (float): order of Hermite derivative in z
n (integer): order of the Boys function
p (float): sum of the Gaussian exponents
dr (array[float]): distance between the center of the composite Gaussian and the nucleus
Returns:
array[float]: value of the Hermite integral
"""
x, y, z = dr[0], dr[1], dr[2]
T = p * (dr ** 2).sum(axis=0)
r = 0
if t == u == v == 0:
f = []
for term in T.flatten():
f.append(_boys(n, term))
return ((-2 * p) ** n) * anp.array(f).reshape(T.shape)
if t == u == 0:
if v > 1:
r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr)
r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr)
return r
if t == 0:
if u > 1:
r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr)
r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr)
return r
if t > 1:
r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr)
r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr)
return r
|
53,509 |
def foo(x, y):
return {x: y}
|
def print_point(x, y):
print(f"Point is located at {x},{y}")
|
1,665 |
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
|
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
|
53,612 |
def test_class_method_inherited() -> None:
"""Tests for class methods that are inherited from a superclass.
Based on https://github.com/PyCQA/astroid/issues/1008.
"""
nodes_ = builder.extract_node(
"""
class A:
@classmethod
def method(cls):
return cls
class B(A):
pass
A().method() #@
A.method() #@
B().method() #@
B.method() #@
"""
)
expecteds = ["A", "A", "B", "B"]
for node, expected in zip(nodes_, expecteds):
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.ClassDef)
assert inferred[0].name == expected
|
def test_class_method_inherited() -> None:
"""Tests for class methods that are inherited from a superclass.
Based on https://github.com/PyCQA/astroid/issues/1008.
"""
nodes_ = builder.extract_node(
"""
class A:
@classmethod
def method(cls):
return cls
class B(A):
pass
A().method() #@
A.method() #@
B().method() #@
B.method() #@
"""
)
expected_names = ["A", "A", "B", "B"]
for node, expected in zip(nodes_, expected_names):
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert len(inferred) == 1
assert isinstance(inferred[0], nodes.ClassDef)
assert inferred[0].name == expected
|
31,850 |
def tableToMarkdown(name, t, headers=None, headerTransform=None, removeNull=False, metadata=None, url_keys=None,
date_fields=None):
"""
Converts a demisto table in JSON form to a Markdown table
:type name: ``str``
:param name: The name of the table (required)
:type t: ``dict`` or ``list``
:param t: The JSON table - List of dictionaries with the same keys or a single dictionary (required)
:type headers: ``list`` or ``string``
:param headers: A list of headers to be presented in the output table (by order). If string will be passed
then table will have single header. Default will include all available headers.
:type headerTransform: ``function``
:param headerTransform: A function that formats the original data headers (optional)
:type removeNull: ``bool``
:param removeNull: Remove empty columns from the table. Default is False
:type metadata: ``str``
:param metadata: Metadata about the table contents
:type url_keys: ``list``
:param url_keys: a list of keys in the given JSON table that should be turned in to clickable
:return: A string representation of the markdown table
:rtype: ``str``
"""
# Turning the urls in the table to clickable
if url_keys:
t = url_to_clickable_markdown(t, url_keys)
mdResult = ''
if name:
mdResult = '### ' + name + '\n'
if metadata:
mdResult += metadata + '\n'
if not t or len(t) == 0:
mdResult += '**No entries.**\n'
return mdResult
if not headers and isinstance(t, dict) and len(t.keys()) == 1:
# in case of a single key, create a column table where each element is in a different row.
headers = list(t.keys())
t = list(t.values())[0]
if not isinstance(t, list):
t = [t]
if headers and isinstance(headers, STRING_TYPES):
headers = [headers]
if not isinstance(t[0], dict):
# the table contains only simple objects (strings, numbers)
# should be only one header
if headers and len(headers) > 0:
header = headers[0]
t = [{header: item} for item in t]
else:
raise Exception("Missing headers param for tableToMarkdown. Example: headers=['Some Header']")
# in case of headers was not provided (backward compatibility)
if not headers:
headers = list(t[0].keys())
headers.sort()
if removeNull:
headers_aux = headers[:]
for header in headers:
if all(obj.get(header) in ('', None, [], {}) for obj in t):
headers_aux.remove(header)
headers = headers_aux
if t and len(headers) > 0:
newHeaders = []
if headerTransform is None: # noqa
def headerTransform(s): return stringEscapeMD(s, True, True) # noqa
for header in headers:
newHeaders.append(headerTransform(header))
mdResult += '|'
if len(newHeaders) == 1:
mdResult += newHeaders[0]
else:
mdResult += '|'.join(newHeaders)
mdResult += '|\n'
sep = '---'
mdResult += '|' + '|'.join([sep] * len(headers)) + '|\n'
for entry in t:
entry_copy = entry.copy()
if date_fields:
for field in date_fields:
try:
entry_copy[field] = datetime.fromtimestamp(int(entry_copy[field])/1000).strftime('%Y-%m-%d %H:%M:%S')
except BaseException:
pass
vals = [stringEscapeMD((formatCell(entry_copy.get(h, ''), False) if entry_copy.get(h) is not None else ''),
True, True) for h in headers]
# this pipe is optional
mdResult += '| '
try:
mdResult += ' | '.join(vals)
except UnicodeDecodeError:
vals = [str(v) for v in vals]
mdResult += ' | '.join(vals)
mdResult += ' |\n'
else:
mdResult += '**No entries.**\n'
return mdResult
|
def tableToMarkdown(name, t, headers=None, headerTransform=None, removeNull=False, metadata=None, url_keys=None,
date_fields=None):
"""
Converts a demisto table in JSON form to a Markdown table
:type name: ``str``
:param name: The name of the table (required)
:type t: ``dict`` or ``list``
:param t: The JSON table - List of dictionaries with the same keys or a single dictionary (required)
:type headers: ``list`` or ``string``
:param headers: A list of headers to be presented in the output table (by order). If string will be passed
then table will have single header. Default will include all available headers.
:type headerTransform: ``function``
:param headerTransform: A function that formats the original data headers (optional)
:type removeNull: ``bool``
:param removeNull: Remove empty columns from the table. Default is False
:type metadata: ``str``
:param metadata: Metadata about the table contents
:type url_keys: ``list``
:param url_keys: a list of keys in the given JSON table that should be turned in to clickable
:return: A string representation of the markdown table
:rtype: ``str``
"""
# Turning the urls in the table to clickable
if url_keys:
t = url_to_clickable_markdown(t, url_keys)
mdResult = ''
if name:
mdResult = '### ' + name + '\n'
if metadata:
mdResult += metadata + '\n'
if not t or len(t) == 0:
mdResult += '**No entries.**\n'
return mdResult
if not headers and isinstance(t, dict) and len(t.keys()) == 1:
# in case of a single key, create a column table where each element is in a different row.
headers = list(t.keys())
t = list(t.values())[0]
if not isinstance(t, list):
t = [t]
if headers and isinstance(headers, STRING_TYPES):
headers = [headers]
if not isinstance(t[0], dict):
# the table contains only simple objects (strings, numbers)
# should be only one header
if headers and len(headers) > 0:
header = headers[0]
t = [{header: item} for item in t]
else:
raise Exception("Missing headers param for tableToMarkdown. Example: headers=['Some Header']")
# in case of headers was not provided (backward compatibility)
if not headers:
headers = list(t[0].keys())
headers.sort()
if removeNull:
headers_aux = headers[:]
for header in headers:
if all(obj.get(header) in ('', None, [], {}) for obj in t):
headers_aux.remove(header)
headers = headers_aux
if t and len(headers) > 0:
newHeaders = []
if headerTransform is None: # noqa
def headerTransform(s): return stringEscapeMD(s, True, True) # noqa
for header in headers:
newHeaders.append(headerTransform(header))
mdResult += '|'
if len(newHeaders) == 1:
mdResult += newHeaders[0]
else:
mdResult += '|'.join(newHeaders)
mdResult += '|\n'
sep = '---'
mdResult += '|' + '|'.join([sep] * len(headers)) + '|\n'
for entry in t:
entry_copy = entry.copy()
if date_fields:
for field in date_fields:
try:
entry_copy[field] = datetime.fromtimestamp(int(entry_copy[field])/1000).strftime('%Y-%m-%d %H:%M:%S')
except Exception:
pass
vals = [stringEscapeMD((formatCell(entry_copy.get(h, ''), False) if entry_copy.get(h) is not None else ''),
True, True) for h in headers]
# this pipe is optional
mdResult += '| '
try:
mdResult += ' | '.join(vals)
except UnicodeDecodeError:
vals = [str(v) for v in vals]
mdResult += ' | '.join(vals)
mdResult += ' |\n'
else:
mdResult += '**No entries.**\n'
return mdResult
|
17,895 |
def get_variation_from_env(envvar_value):
"""Return a tuple with variation data from the specific environment
variable key.
Raise an exception if the passed in envvar doesn't look something
like: '0 100'
"""
try:
# We want to create a tuple of integers from a string containing
# integers. Anything else should throw.
rv = tuple(int(x) for x in envvar_value.strip().split(' '))
if (len(rv) is not 2):
raise Exception('The format is incorrect. Expected "{int} {int}"?')
except Exception as e:
print("Something went wrong: {0}".format(e))
raise e
return rv
|
def get_variation_from_env(envvar_value):
"""Return a tuple with variation data from the specific environment
variable key.
Raise an exception if the passed in envvar doesn't look something
like: '0 100'
"""
try:
# We want to create a tuple of integers from a string containing
# integers. Anything else should throw.
rv = tuple(int(x) for x in envvar_value.strip().split())
if (len(rv) is not 2):
raise Exception('The format is incorrect. Expected "{int} {int}"?')
except Exception as e:
print("Something went wrong: {0}".format(e))
raise e
return rv
|
23,611 |
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
54,474 |
def main():
# Settings
# set number of Optuna trials
TRIALS = 20
# set number of CV folds
FOLDS = 5
# the name of the validation part of each fold,
# passed to *both* 'eval_names' arg of .fit()
# and to 'valid_name' arg of LightGBMPruningCallback()
CV_EVAL_NAME = "valid"
# whether to use early stopping
USE_STOPPING = True
# fix number of boosting rounds
# note: will be overridden if USE_STOPPING is True
FIXED_ROUNDS = -1
if not USE_STOPPING:
FIXED_ROUNDS = 500
# set model objective
OBJECTIVE = "binary"
# OBJECTIVE = "regression"
# set metric for model training and validation
# as well as its optimization direction
if OBJECTIVE in ["binary", "classification"]:
METRIC = "auc"
OPT_DIR = "maximize"
elif OBJECTIVE in ["regression"]:
METRIC = "rmse"
OPT_DIR = "minimize"
# whether to make Optuna experiment reproducible
# (with a repeatable set of metrics for each set of seeds)
# at the cost of performance (single worker)
MAKE_REPRODUCIBLE = True
# define the number of threads
# to be used by lightgbm
# in each of the workers
LGBM_NUM_THREADS = 2
# set number of Optuna workers
WORKERS_NUM = 2
if MAKE_REPRODUCIBLE:
# turn off multiprocessing in Optuna samplers to ensure reproducible results
# (https://optuna.readthedocs.io/en/stable/faq.html#how-can-i-obtain-reproducible-optimization-results)
WORKERS_NUM = 1
# fix seed for data partitioning (train_test_split())
# and morel training (lgbm.fit())
SEED = 123
# select sampler for Optuna:
# (see more samplers at: https://optuna.readthedocs.io/en/stable/reference/samplers.html)
# - TPE (Tree-structured Parzen Estimator) sampling algo;
# note we fix sampler seed to make the sampling process deterministic
SAMPLER_SEED = 123
OPTUNA_SAMPLER = TPESampler(seed=SAMPLER_SEED)
# whether to prune unpromising trials to speed up studies
USE_PRUNING = True
# select pruner for Optuna:
# (see more pruners at: https://optuna.readthedocs.io/en/latest/reference/pruners.html)
# - median pruner
OPTUNA_PRUNER = MedianPruner()
# set verbosity level for lightgbm
VERBOSE_LGBM = -1
# Experiment
# Load data
train_x_df, _, train_y_df, _ = load_sklearn_toy_data(
objective=OBJECTIVE, test_size=0.01, as_df=True, seed=SEED
)
# Optimize params using Optuna
optuna_best_metrics = {}
optuna_best_params = {}
# Instantiate the custom function
objective = Objective_custom(
train_x_df,
train_y_df,
objective=OBJECTIVE,
eval_metric=METRIC,
eval_name=CV_EVAL_NAME,
folds=FOLDS,
fixed_rounds=FIXED_ROUNDS,
use_stopping=USE_STOPPING,
use_pruning=USE_PRUNING,
n_jobs=LGBM_NUM_THREADS,
seed=SEED,
verbosity=VERBOSE_LGBM,
)
study = optuna.create_study(sampler=OPTUNA_SAMPLER, pruner=OPTUNA_PRUNER, direction=OPT_DIR)
start_time = datetime.now()
# run Optuna optimization over the specified number of trials
study.optimize(objective, n_trials=TRIALS, n_jobs=WORKERS_NUM)
optim_time_custom = datetime.now() - start_time
print(
"\nOptuna+custom fun. study with %d trials took %.2f s. Time per trial: %.2f s."
% (TRIALS, optim_time_custom.total_seconds(), optim_time_custom.total_seconds() / TRIALS)
)
optuna_best_metrics["custom"] = study.best_value
best_trial = study.best_trial
optimized_best_params = best_trial.params
# append static parameters not returned by Optuna
static_params = {
"boosting_type": "gbdt",
"metric": METRIC,
"objective": OBJECTIVE,
"n_jobs": LGBM_NUM_THREADS,
"seed": SEED,
"verbosity": VERBOSE_LGBM,
}
all_best_params = {**optimized_best_params, **static_params}
optuna_best_params["custom"] = all_best_params
print(
"\nBest %s metric for optuna+custom fun. (CV models mean reported by Optuna): %.5f\n"
% (METRIC, optuna_best_metrics["custom"])
)
print("Optuna-optimized best hyperparameters: ")
pprint(optuna_best_params["custom"])
|
def main():
# Settings
# set number of Optuna trials
TRIALS = 20
# set number of CV folds
FOLDS = 5
# the name of the validation part of each fold,
# passed to *both* 'eval_names' arg of .fit()
# and to 'valid_name' arg of LightGBMPruningCallback()
CV_EVAL_NAME = "valid"
# whether to use early stopping
USE_STOPPING = True
# fix number of boosting rounds
# note: will be overridden if USE_STOPPING is True
FIXED_ROUNDS = -1
if not USE_STOPPING:
FIXED_ROUNDS = 500
# set model objective
OBJECTIVE = "binary"
# OBJECTIVE = "regression"
# set metric for model training and validation
# as well as its optimization direction
if OBJECTIVE in ["binary", "classification"]:
METRIC = "auc"
OPT_DIR = "maximize"
elif OBJECTIVE in ["regression"]:
METRIC = "rmse"
OPT_DIR = "minimize"
# whether to make Optuna experiment reproducible
# (with a repeatable set of metrics for each set of seeds)
# at the cost of performance (single worker)
MAKE_REPRODUCIBLE = True
# define the number of threads
# to be used by lightgbm
# in each of the workers
LGBM_NUM_THREADS = 2
# set number of Optuna workers
WORKERS_NUM = 2
if MAKE_REPRODUCIBLE:
# turn off multiprocessing in Optuna samplers to ensure reproducible results
# (https://optuna.readthedocs.io/en/stable/faq.html#how-can-i-obtain-reproducible-optimization-results)
WORKERS_NUM = 1
# fix seed for data partitioning (train_test_split())
# and morel training (lgbm.fit())
SEED = 123
# select sampler for Optuna:
# (see more samplers at: https://optuna.readthedocs.io/en/stable/reference/samplers.html)
# - TPE (Tree-structured Parzen Estimator) sampling algo;
# note we fix sampler seed to make the sampling process deterministic
SAMPLER_SEED = 123
OPTUNA_SAMPLER = TPESampler(seed=SAMPLER_SEED)
# whether to prune unpromising trials to speed up studies
USE_PRUNING = True
# select pruner for Optuna:
# (see more pruners at: https://optuna.readthedocs.io/en/latest/reference/pruners.html)
# - median pruner
OPTUNA_PRUNER = MedianPruner()
# set verbosity level for lightgbm
VERBOSE_LGBM = -1
# Experiment
# Load data
train_x_df, _, train_y_df, _ = load_sklearn_toy_data(
objective=OBJECTIVE, test_size=0.01, as_df=True, seed=SEED
)
# Optimize params using Optuna
optuna_best_metrics = {}
optuna_best_params = {}
# Instantiate the custom function
objective = Objective_custom(
train_x_df,
train_y_df,
objective=OBJECTIVE,
eval_metric=METRIC,
eval_name=CV_EVAL_NAME,
folds=FOLDS,
fixed_rounds=FIXED_ROUNDS,
use_stopping=USE_STOPPING,
use_pruning=USE_PRUNING,
n_jobs=LGBM_NUM_THREADS,
seed=SEED,
verbosity=VERBOSE_LGBM,
)
study = optuna.create_study(sampler=OPTUNA_SAMPLER, pruner=OPTUNA_PRUNER, direction=OPT_DIR)
start_time = datetime.now()
# run Optuna optimization over the specified number of trials
study.optimize(objective, n_trials=TRIALS, n_jobs=WORKERS_NUM)
optim_time_custom = datetime.now() - start_time
print(
"\nOptuna+custom fun. study with %d trials took %.2f s. Time per trial: %.2f s."
% (TRIALS, optim_time_custom.total_seconds(), optim_time_custom.total_seconds() / TRIALS)
)
optuna_best_metrics["custom"] = study.best_value
best_trial = study.best_trial
optimized_best_params = best_trial.params
# append static parameters not returned by Optuna
static_params = {
"boosting_type": "gbdt",
"metric": METRIC,
"objective": OBJECTIVE,
"n_jobs": LGBM_NUM_THREADS,
"seed": SEED,
"verbosity": VERBOSE_LGBM,
}
all_best_params = {**optimized_best_params, **static_params}
optuna_best_params["custom"] = all_best_params
print(
"\nBest {} metric for optuna+custom fun. (CV models mean reported by Optuna): {:.5f}\n".format(
METRIC, optuna_best_metrics["custom"]
)
)
print("Optuna-optimized best hyperparameters: ")
pprint(optuna_best_params["custom"])
|
23,145 |
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions] # type: ignore
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources) # type: ignore
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | Mapping = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions] # type: ignore
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources) # type: ignore
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | dict = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
25,351 |
def strikethrough(text: str, escape_formatting: bool = True) -> str:
"""Get the given text with a strikethrough.
Note: This escapes text prior to applying a strikethrough
Parameters
----------
text : str
The text to be marked up.
escape_formatting : `bool`, optional
Set to :code:`False` to not escape markdown formatting in the text.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=escape_formatting)
return "~~{}~~".format(text)
|
def strikethrough(text: str, escape_formatting: bool = True) -> str:
"""Get the given text with a strikethrough.
Note: By default, this function will escape ``text`` prior to applying a strikethrough.
Parameters
----------
text : str
The text to be marked up.
escape_formatting : `bool`, optional
Set to :code:`False` to not escape markdown formatting in the text.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=escape_formatting)
return "~~{}~~".format(text)
|
11,067 |
def replace_metacharacters(pattern):
r"""
Replace metacharacters in regex patterns that should not be present in
simplified patterns. E.g.,
1. ^/\b<var>\B/ ==> /<var>/
2. /<slug>/$ ==> /<slug>/
"""
pattern = re.sub(
r"(\\b|\\B|[\^\$\?])",
"",
pattern
)
return pattern
|
def replace_metacharacters(pattern):
r"""
Replace metacharacters in regex patterns that should not be present in
simplified patterns. E.g.,
1. ^/\b<var>\B/ ==> /<var>/
2. /<slug>/$ ==> /<slug>/
"""
return re.sub(r'(\\[ABZb]|[?$^])', '', pattern)
|
32,365 |
def main():
user_profile = None
params = demisto.params()
base_url = urljoin(params['url'].strip('/'), '/api/v1/')
token = params.get('apitoken')
mapper_in = params.get('mapper-in')
mapper_out = params.get('mapper-out')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
args = demisto.args()
is_create_enabled = params.get("create-user-enabled")
is_enable_enabled = params.get("enable-user-enabled")
is_disable_enabled = params.get("disable-user-enabled")
is_update_enabled = demisto.params().get("update-user-enabled")
create_if_not_exists = demisto.params().get("create-if-not-exists")
is_fetch = params.get('isFetch')
first_fetch_str = params.get('first_fetch')
fetch_limit = int(params.get('max_fetch', 1))
auto_generate_query_filter = params.get('auto_generate_query_filter')
fetch_query_filter = params.get('fetch_query_filter')
context = demisto.getIntegrationContext()
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'SSWS {token}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
ok_codes=(200,)
)
demisto.debug(f'Command being called is {command}')
if command == 'iam-get-user':
user_profile = get_user_command(client, args, mapper_in, mapper_out)
elif command == 'iam-create-user':
user_profile = create_user_command(client, args, mapper_out, is_create_enabled,
is_update_enabled, is_enable_enabled)
elif command == 'iam-update-user':
user_profile = update_user_command(client, args, mapper_out, is_update_enabled, is_enable_enabled,
is_create_enabled, create_if_not_exists)
elif command == 'iam-disable-user':
user_profile = disable_user_command(client, args, is_disable_enabled, mapper_out)
if user_profile:
return_results(user_profile)
try:
if command == 'test-module':
test_module(client, is_fetch, fetch_query_filter, auto_generate_query_filter, context, first_fetch_str)
elif command == 'get-mapping-fields':
return_results(get_mapping_fields_command(client))
elif command == 'okta-get-app-user-assignment':
return_results(get_app_user_assignment_command(client, args))
elif command == 'okta-iam-list-applications':
return_results(list_apps_command(client, args))
elif command == 'okta-iam-list-user-applications':
return_results(list_user_apps_command(client, args))
elif command == 'okta-iam-get-configuration':
return_results(get_configuration(context))
elif command == 'okta-iam-set-configuration':
context = set_configuration(args)
demisto.setIntegrationContext(context)
elif command == 'iam-get-group':
return_results(get_group_command(client, args))
elif command == 'okta-get-logs':
return_results(get_logs_command(client, args))
elif command == 'fetch-incidents':
last_run = demisto.getLastRun()
context = demisto.getIntegrationContext()
incidents, next_run = fetch_incidents(client, last_run, first_fetch_str, fetch_limit,
fetch_query_filter, auto_generate_query_filter, context)
demisto.incidents(incidents)
demisto.setLastRun(next_run)
except Exception:
# For any other integration command exception, return an error
return_error(f'Failed to execute {command} command.')
|
def main():
user_profile = None
params = demisto.params()
base_url = urljoin(params['url'].strip('/'), '/api/v1/')
token = params.get('apitoken')
mapper_in = params.get('mapper-in')
mapper_out = params.get('mapper-out')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
args = demisto.args()
is_create_enabled = params.get("create-user-enabled")
is_enable_enabled = params.get("enable-user-enabled")
is_disable_enabled = params.get("disable-user-enabled")
is_update_enabled = demisto.params().get("update-user-enabled")
create_if_not_exists = demisto.params().get("create-if-not-exists")
is_fetch = params.get('isFetch')
first_fetch_str = params.get('first_fetch')
fetch_limit = int(params.get('max_fetch', 1))
auto_generate_query_filter = params.get('auto_generate_query_filter')
fetch_query_filter = params.get('fetch_query_filter')
context = demisto.getIntegrationContext()
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'SSWS {token}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
ok_codes=(200,)
)
demisto.debug(f'Command being called is {command}')
if command == 'iam-get-user':
user_profile = get_user_command(client, args, mapper_in, mapper_out)
elif command == 'iam-create-user':
user_profile = create_user_command(client, args, mapper_out, is_create_enabled,
is_update_enabled, is_enable_enabled)
elif command == 'iam-update-user':
user_profile = update_user_command(client, args, mapper_out, is_update_enabled, is_enable_enabled,
is_create_enabled, create_if_not_exists)
elif command == 'iam-disable-user':
user_profile = disable_user_command(client, args, is_disable_enabled, mapper_out)
if user_profile:
return_results(user_profile)
try:
if command == 'test-module':
test_module(client, is_fetch, fetch_query_filter, auto_generate_query_filter, context, first_fetch_str)
elif command == 'get-mapping-fields':
return_results(get_mapping_fields_command(client))
elif command == 'okta-get-app-user-assignment':
return_results(get_app_user_assignment_command(client, args))
elif command == 'okta-iam-list-applications':
return_results(list_apps_command(client, args))
elif command == 'okta-iam-list-user-applications':
return_results(list_user_apps_command(client, args))
elif command == 'okta-iam-get-configuration':
return_results(get_configuration(context))
elif command == 'okta-iam-set-configuration':
context = set_configuration(args)
demisto.setIntegrationContext(context)
elif command == 'iam-get-group':
return_results(get_group_command(client, args))
elif command == 'okta-get-logs':
return_results(get_logs_command(client, args))
elif command == 'fetch-incidents':
last_run = demisto.getLastRun()
context = demisto.getIntegrationContext()
incidents, next_run = fetch_incidents(client, last_run, first_fetch_str, fetch_limit,
fetch_query_filter, auto_generate_query_filter, context)
demisto.incidents(incidents)
demisto.setLastRun(next_run)
except Exception:
# For any other integration command exception, return an error
return_error(f'Failed to execute {command} command. Error: {e}')
|
38,409 |
def _read_part_file_descriptor(fname: Union[str, Path]):
"""
Read a file descriptor and returns the array of the fields found.
"""
# Mapping
mapping_list = [
("position_x", "particle_position_x"),
("position_y", "particle_position_y"),
("position_z", "particle_position_z"),
("velocity_x", "particle_velocity_x"),
("velocity_y", "particle_velocity_y"),
("velocity_z", "particle_velocity_z"),
("mass", "particle_mass"),
("identity", "particle_identity"),
("levelp", "particle_level"),
("family", "particle_family"),
("tag", "particle_tag"),
]
# Convert to dictionary
mapping = {k: v for k, v in mapping_list}
with open(fname) as f:
line = f.readline()
tmp = VERSION_RE.match(line)
mylog.debug("Reading part file descriptor %s.", fname)
if not tmp:
raise YTParticleOutputFormatNotImplemented()
version = int(tmp.group(1))
if version == 1:
# Skip one line (containing the headers)
line = f.readline()
fields = []
for i, line in enumerate(f.readlines()):
tmp = VAR_DESC_RE.match(line)
if not tmp:
raise YTFileNotParseable(fname, i + 1)
# ivar = tmp.group(1)
varname = tmp.group(2)
dtype = tmp.group(3)
if varname in mapping:
varname = mapping[varname]
else:
varname = f"particle_{varname}"
fields.append((varname, dtype))
else:
raise YTParticleOutputFormatNotImplemented()
return fields
|
def _read_part_file_descriptor(fname: Union[str, "os.PathLike[str]"]):
"""
Read a file descriptor and returns the array of the fields found.
"""
# Mapping
mapping_list = [
("position_x", "particle_position_x"),
("position_y", "particle_position_y"),
("position_z", "particle_position_z"),
("velocity_x", "particle_velocity_x"),
("velocity_y", "particle_velocity_y"),
("velocity_z", "particle_velocity_z"),
("mass", "particle_mass"),
("identity", "particle_identity"),
("levelp", "particle_level"),
("family", "particle_family"),
("tag", "particle_tag"),
]
# Convert to dictionary
mapping = {k: v for k, v in mapping_list}
with open(fname) as f:
line = f.readline()
tmp = VERSION_RE.match(line)
mylog.debug("Reading part file descriptor %s.", fname)
if not tmp:
raise YTParticleOutputFormatNotImplemented()
version = int(tmp.group(1))
if version == 1:
# Skip one line (containing the headers)
line = f.readline()
fields = []
for i, line in enumerate(f.readlines()):
tmp = VAR_DESC_RE.match(line)
if not tmp:
raise YTFileNotParseable(fname, i + 1)
# ivar = tmp.group(1)
varname = tmp.group(2)
dtype = tmp.group(3)
if varname in mapping:
varname = mapping[varname]
else:
varname = f"particle_{varname}"
fields.append((varname, dtype))
else:
raise YTParticleOutputFormatNotImplemented()
return fields
|
31,710 |
def group_entry(group_object, custom_attributes):
# create an endpoint entry from a group object
group = {
'Type': 'AD',
'ID': group_object.get('dn'),
'Name': group_object.get('name'),
'Groups': group_object.get('memberOf')
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in group_object.keys()
}
for attr in custom_attributes:
if attr == '*':
continue
try:
group[attr] = group_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
group[cased_custom_attr] = group_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return group
|
def group_entry(group_object, custom_attributes):
# create a group entry from a group object
group = {
'Type': 'AD',
'ID': group_object.get('dn'),
'Name': group_object.get('name'),
'Groups': group_object.get('memberOf')
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in group_object.keys()
}
for attr in custom_attributes:
if attr == '*':
continue
try:
group[attr] = group_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
group[cased_custom_attr] = group_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return group
|
7,539 |
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
with pytest.warns(UserWarning, match="dtype is converted to float"):
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm / u.s**2)
assert isinstance(qtab['f'], u.Dex)
|
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
with pytest.warns(AstropyUserWarning, match="dtype is converted to float"):
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm / u.s**2)
assert isinstance(qtab['f'], u.Dex)
|
14,783 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor."""
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
client = AtomeClient(username, password)
except PyAtomeError as exp:
_LOGGER.error(exp)
return False
# finally:
# client.close_session()
add_entities([AtomeSensor(name, client)])
return True
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor."""
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
client = AtomeClient(username, password)
except PyAtomeError as exp:
_LOGGER.error(exp)
return False
# finally:
# client.close_session()
add_entities([AtomeSensor(name, client)], True)
return True
|
43,712 |
def one_particle(t_matrix_elements, core=None, active=None, cutoff=1.0e-12):
r"""Generates the table of matrix elements of a given one-particle operator
required to build many-body qubit observables.
Second quantized one-particle operators are expanded in the basis of single-particle
states as
.. math::
\hat{T} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}].
In the equation above the indices :math:`\alpha, \beta` run over the basis of spatial
orbitals :math:`\vert \alpha \rangle = \phi_\alpha(r)`. Since the operator :math:`t`
acts only on the spatial coordinates, the spin quantum numbers are indicated explicitly
with the up/down arrows. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
particle creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix elements of
the operator :math:`\hat{t}`
.. math::
\langle \alpha \vert \hat{t} \vert \beta \rangle = \int dr ~ \phi_\alpha^*(r)
\hat{t}(r) \phi_\beta(r).
If an active space is defined (see :func:`~.active_space`), the summation indices
run over the active orbitals and the contribution due to core orbitals is computed as
:math:`T_\mathrm{core} = 2 \sum_{\alpha\in \mathrm{core}}
\langle \alpha \vert \hat{t} \vert \beta \rangle`.
Args:
t_matrix_elements (array[float]): 2D NumPy array with the matrix elements
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle`
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
cutoff (float): Cutoff value for including matrix elements. The
matrix elements with absolute value less than ``cutoff`` are neglected.
Returns:
tuple: Table of indices and matrix elements of the one-particle operator
and the contribution due to core orbitals. The returned table is a 2D Numpy
array where each row contains three elements, the *spin*-orbital indices
:math:`\alpha, \beta` and the matrix element
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle`.
**Example**
>>> t_matrix_elements = np.array([[-4.72739313e+00, -1.05499666e-01, -1.66961416e-01,
... 6.18014041e-16, 2.86964662e-16, -3.46772026e-02],
... [-1.05499666e-01, -1.49264622e+00, 3.28928073e-02,
... -2.20398308e-16, 1.93277291e-16, 5.27078882e-02],
... [-1.66961416e-01, 3.28928073e-02, -1.12554473e+00,
... -2.82912389e-17, 2.55224784e-16, -3.04455743e-02],
... [ 6.18014041e-16, -2.20398308e-16, -2.82912389e-17,
... -1.13579985e+00, -1.94289029e-16, -2.36158697e-16],
... [ 2.86964662e-16, 1.93277291e-16, 2.55224784e-16,
... -2.77555756e-16, -1.13579985e+00, 2.06665432e-16],
... [-3.46772026e-02, 5.27078882e-02, -3.04455743e-02,
... -2.36158697e-16, 2.06665432e-16, -9.50966595e-01]]
>>> t_table, t_core = one_particle(t_matrix_elements, core=[0], active=[1, 2])
>>> print(t_table)
[[ 0. 0. -1.49264622]
[ 1. 1. -1.49264622]
[ 0. 2. 0.03289281]
[ 1. 3. 0.03289281]
[ 2. 0. 0.03289281]
[ 3. 1. 0.03289281]
[ 2. 2. -1.12554473]
[ 3. 3. -1.12554473]]
>>> print(t_core)
-9.45478626
"""
orbitals = t_matrix_elements.shape[0]
if t_matrix_elements.ndim != 2:
raise ValueError(
"'t_matrix_elements' must be a 2D array; got t_matrix_elements.ndim = {}".format(
t_matrix_elements.ndim
)
)
if core is None:
t_core = 0
else:
if True in [i > orbitals - 1 or i < 0 for i in core]:
raise ValueError(
"Indices of core orbitals must be between 0 and {}; got core = {}".format(
orbitals, core
)
)
# Compute contribution due to core orbitals
t_core = 2 * sum([t_matrix_elements[alpha, alpha] for alpha in core])
if active is None:
if core is None:
active = list(range(orbitals))
else:
active = [i for i in range(orbitals) if i not in core]
if True in [i > orbitals - 1 or i < 0 for i in active]:
raise ValueError(
"Indices of active orbitals must be between 0 and {}; got active = {}".format(
orbitals, active
)
)
# Indices of the matrix elements with absolute values >= cutoff
indices = np.nonzero(np.abs(t_matrix_elements) >= cutoff)
# Single out the indices of active orbitals
num_indices = len(indices[0])
pairs = [
[indices[0][i], indices[1][i]]
for i in range(num_indices)
if all(indices[j][i] in active for j in range(len(indices)))
]
# Building the table of indices and matrix elements
t_table = np.zeros((2 * len(pairs), 3))
for i, pair in enumerate(pairs):
alpha, beta = pair
element = t_matrix_elements[alpha, beta]
# spin-up term
t_table[2 * i, 0] = 2 * active.index(alpha)
t_table[2 * i, 1] = 2 * active.index(beta)
t_table[2 * i, 2] = element
# spin-down term
t_table[2 * i + 1, 0] = 2 * active.index(alpha) + 1
t_table[2 * i + 1, 1] = 2 * active.index(beta) + 1
t_table[2 * i + 1, 2] = element
return t_table, t_core
|
def one_particle(t_matrix_elements, core=None, active=None, cutoff=1.0e-12):
r"""Generates the table of matrix elements of a given one-particle operator
required to build many-body qubit observables.
Second quantized one-particle operators are expanded in the basis of single-particle
states as
.. math::
\hat{T} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}].
In the equation above the indices :math:`\alpha, \beta` run over the basis of spatial
orbitals :math:`\vert \alpha \rangle = \phi_\alpha(r)`. Since the operator :math:`t`
acts only on the spatial coordinates, the spin quantum numbers are indicated explicitly
with the up/down arrows. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
particle creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix elements of
the operator :math:`\hat{t}`
.. math::
\langle \alpha \vert \hat{t} \vert \beta \rangle = \int dr ~ \phi_\alpha^*(r)
\hat{t}(r) \phi_\beta(r).
If an active space is defined (see :func:`~.active_space`), the summation indices
run over the active orbitals and the contribution due to core orbitals is computed as
:math:`T_\mathrm{core} = 2 \sum_{\alpha\in \mathrm{core}}
\langle \alpha \vert \hat{t} \vert \beta \rangle`.
Args:
t_matrix_elements (array[float]): 2D NumPy array with the matrix elements
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle`
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
cutoff (float): Cutoff value for including matrix elements. The
matrix elements with absolute value less than ``cutoff`` are neglected.
Returns:
tuple: Table of indices and matrix elements of the one-particle operator
and the contribution due to core orbitals. The returned table is a 2D Numpy
array where each row contains three elements, the *spin*-orbital indices
:math:`\alpha, \beta` and the matrix element
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle`.
**Example**
>>> t_matrix_elements = np.array([[-4.72739313e+00, -1.05499666e-01, -1.66961416e-01,
... 6.18014041e-16, 2.86964662e-16, -3.46772026e-02],
... [-1.05499666e-01, -1.49264622e+00, 3.28928073e-02,
... -2.20398308e-16, 1.93277291e-16, 5.27078882e-02],
... [-1.66961416e-01, 3.28928073e-02, -1.12554473e+00,
... -2.82912389e-17, 2.55224784e-16, -3.04455743e-02],
... [ 6.18014041e-16, -2.20398308e-16, -2.82912389e-17,
... -1.13579985e+00, -1.94289029e-16, -2.36158697e-16],
... [ 2.86964662e-16, 1.93277291e-16, 2.55224784e-16,
... -2.77555756e-16, -1.13579985e+00, 2.06665432e-16],
... [-3.46772026e-02, 5.27078882e-02, -3.04455743e-02,
... -2.36158697e-16, 2.06665432e-16, -9.50966595e-01]]
>>> table, t_core = one_particle(matrix_elements, core=[0], active=[1, 2])
>>> print(t_table)
[[ 0. 0. -1.49264622]
[ 1. 1. -1.49264622]
[ 0. 2. 0.03289281]
[ 1. 3. 0.03289281]
[ 2. 0. 0.03289281]
[ 3. 1. 0.03289281]
[ 2. 2. -1.12554473]
[ 3. 3. -1.12554473]]
>>> print(t_core)
-9.45478626
"""
orbitals = t_matrix_elements.shape[0]
if t_matrix_elements.ndim != 2:
raise ValueError(
"'t_matrix_elements' must be a 2D array; got t_matrix_elements.ndim = {}".format(
t_matrix_elements.ndim
)
)
if core is None:
t_core = 0
else:
if True in [i > orbitals - 1 or i < 0 for i in core]:
raise ValueError(
"Indices of core orbitals must be between 0 and {}; got core = {}".format(
orbitals, core
)
)
# Compute contribution due to core orbitals
t_core = 2 * sum([t_matrix_elements[alpha, alpha] for alpha in core])
if active is None:
if core is None:
active = list(range(orbitals))
else:
active = [i for i in range(orbitals) if i not in core]
if True in [i > orbitals - 1 or i < 0 for i in active]:
raise ValueError(
"Indices of active orbitals must be between 0 and {}; got active = {}".format(
orbitals, active
)
)
# Indices of the matrix elements with absolute values >= cutoff
indices = np.nonzero(np.abs(t_matrix_elements) >= cutoff)
# Single out the indices of active orbitals
num_indices = len(indices[0])
pairs = [
[indices[0][i], indices[1][i]]
for i in range(num_indices)
if all(indices[j][i] in active for j in range(len(indices)))
]
# Building the table of indices and matrix elements
t_table = np.zeros((2 * len(pairs), 3))
for i, pair in enumerate(pairs):
alpha, beta = pair
element = t_matrix_elements[alpha, beta]
# spin-up term
t_table[2 * i, 0] = 2 * active.index(alpha)
t_table[2 * i, 1] = 2 * active.index(beta)
t_table[2 * i, 2] = element
# spin-down term
t_table[2 * i + 1, 0] = 2 * active.index(alpha) + 1
t_table[2 * i + 1, 1] = 2 * active.index(beta) + 1
t_table[2 * i + 1, 2] = element
return t_table, t_core
|
17,503 |
def open_dataarray(
filename_or_obj: str | os.PathLike,
*,
engine: T_Engine = None,
chunks: T_Chunks = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | None = None,
decode_times: bool | None = None,
decode_timedelta: bool | None = None,
use_cftime: bool | None = None,
concat_characters: bool | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> DataArray:
"""Open an DataArray from a file or file-like object containing a single
data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr", None}, installed backenend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
chunks : int, dict, 'auto' or None, optional
If chunks is provided, it is used to load the new dataset into dask
arrays. ``chunks=-1`` loads the dataset with dask using a single
chunk for all arrays. `chunks={}`` loads the dataset with dask using
engine preferred chunks if exposed by the backend, otherwise with
a single chunk for all arrays.
``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks. See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy", "pynio", "pseudonetcdf", "cfgrib".
See engine open function for kwargs accepted by each specific engine.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(
filename_or_obj,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
engine=engine,
chunks=chunks,
cache=cache,
drop_variables=drop_variables,
inline_array=inline_array,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
**kwargs,
)
if len(dataset.data_vars) != 1:
raise ValueError(
"Given file dataset contains more than one data "
"variable. Please read with xarray.open_dataset and "
"then select the variable you want."
)
else:
(data_array,) = dataset.data_vars.values()
data_array.set_close(dataset._close)
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
|
def open_dataarray(
filename_or_obj: str | os.PathLike,
*,
engine: T_Engine = None,
chunks: T_Chunks = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | None = None,
decode_times: bool | None = None,
decode_timedelta: bool | None = None,
use_cftime: bool | None = None,
concat_characters: bool | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> DataArray:
"""Open an DataArray from a file or file-like object containing a single
data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr", None}, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
chunks : int, dict, 'auto' or None, optional
If chunks is provided, it is used to load the new dataset into dask
arrays. ``chunks=-1`` loads the dataset with dask using a single
chunk for all arrays. `chunks={}`` loads the dataset with dask using
engine preferred chunks if exposed by the backend, otherwise with
a single chunk for all arrays.
``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks. See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy", "pynio", "pseudonetcdf", "cfgrib".
See engine open function for kwargs accepted by each specific engine.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(
filename_or_obj,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
engine=engine,
chunks=chunks,
cache=cache,
drop_variables=drop_variables,
inline_array=inline_array,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
**kwargs,
)
if len(dataset.data_vars) != 1:
raise ValueError(
"Given file dataset contains more than one data "
"variable. Please read with xarray.open_dataset and "
"then select the variable you want."
)
else:
(data_array,) = dataset.data_vars.values()
data_array.set_close(dataset._close)
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
|
43,819 |
def qfunc_transform(tape_transform):
"""Converts a single tape transform to a quantum function (qfunc) transform.
Args:
tape_transform (single_tape_transform): the single tape transform
to convert into the qfunc transform.
Returns:
function: A qfunc transform, that acts on any qfunc returning a *new*
qfunc as per the tape transform.
**Example**
Given a single tape transform ``my_transform(tape, x, y)``, you can use
this function to convert it into a qfunc transform:
>>> my_qfunc_transform = qfunc_transform(my_transform)
It can then be used to transform an existing qfunc:
>>> new_qfunc = my_qfunc_transform(old_qfunc, 0.6, 0.7)
It can also be used as a decorator:
.. code-block:: python
@qml.qfunc_transform
@qml.single_tape_transform
def my_transform(tape, x, y):
for op in tape.operations + tape.measurements:
if op.name == "CRX":
wires = op.wires
param = op.parameters[0]
qml.RX(x * param, wires=wires[1])
qml.RY(y * qml.math.sqrt(param), wires=wires[1])
qml.CZ(wires=[wires[1], wires[0]])
else:
op.queue()
@my_transform(0.6, 0.1)
def qfunc(x):
qml.Hadamard(wires=0)
qml.CRX(x, wires=[0, 1])
Let's use this qfunc to create a QNode, so that we can execute it on a quantum
device:
>>> dev = qml.device("default.qubit", wires=2)
>>> qnode = qml.QNode(qfunc, dev)
>>> print(qml.draw(qnode)(2.5))
0: ββHββββββββββββββββββββZβββ€
1: ββRX(1.5)ββRY(0.158)βββ°Cβββ€
The not only is the transformed qfunc fully differentiable, but the qfunc transform
parameters *themselves* are differentiable:
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
def ansatz(x):
qml.Hadamard(wires=0)
qml.CRX(x, wires=[0, 1])
@qml.qnode(dev)
def circuit(param, transform_weights):
qml.RX(0.1, wires=0)
# apply the transform to the ansatz
my_transform(*transform_weights)(ansatz)(param)
return qml.expval(qml.PauliZ(1))
We can print this QNode to show that the qfunc transform is taking place:
>>> x = np.array(0.5, requires_grad=True)
>>> y = np.array([0.1, 0.2], requires_grad=True)
>>> print(qml.draw(circuit)(x, y))
0: ββHβββββββββββββββββββββZβββ€
1: ββRX(0.05)ββRY(0.141)βββ°Cβββ€ β¨Zβ©
Evaluating the QNode, as well as the derivative, with respect to the gate
parameter *and* the transform weights:
>>> circuit(x, y)
0.9887793925354269
>>> qml.grad(circuit)(x, y)
(array(-0.02485651), array([-0.02474011, -0.09954244]))
"""
if not isinstance(tape_transform, single_tape_transform):
raise ValueError("Can only convert single tape transforms into qfunc transforms!")
@functools.wraps(tape_transform)
def make_qfunc_transform(*targs, **tkwargs):
def wrapper(fn):
@functools.wraps(fn)
def internal_wrapper(*args, **kwargs):
tape = make_tape(fn)(*args, **kwargs)
tape = tape_transform(tape, *targs, **tkwargs)
return tape.measurements
return internal_wrapper
return wrapper
return make_qfunc_transform
|
def qfunc_transform(tape_transform):
"""Converts a single tape transform to a quantum function (qfunc) transform.
Args:
tape_transform (single_tape_transform): the single tape transform
to convert into the qfunc transform.
Returns:
function: A qfunc transform, that acts on any qfunc returning a *new*
qfunc as per the tape transform.
**Example**
Not only is the transformed QNode fully differentiable, but the QNode transform
Given a single tape transform ``my_transform(tape, x, y)``, you can use
this function to convert it into a qfunc transform:
>>> my_qfunc_transform = qfunc_transform(my_transform)
It can then be used to transform an existing qfunc:
>>> new_qfunc = my_qfunc_transform(old_qfunc, 0.6, 0.7)
It can also be used as a decorator:
.. code-block:: python
@qml.qfunc_transform
@qml.single_tape_transform
def my_transform(tape, x, y):
for op in tape.operations + tape.measurements:
if op.name == "CRX":
wires = op.wires
param = op.parameters[0]
qml.RX(x * param, wires=wires[1])
qml.RY(y * qml.math.sqrt(param), wires=wires[1])
qml.CZ(wires=[wires[1], wires[0]])
else:
op.queue()
@my_transform(0.6, 0.1)
def qfunc(x):
qml.Hadamard(wires=0)
qml.CRX(x, wires=[0, 1])
Let's use this qfunc to create a QNode, so that we can execute it on a quantum
device:
>>> dev = qml.device("default.qubit", wires=2)
>>> qnode = qml.QNode(qfunc, dev)
>>> print(qml.draw(qnode)(2.5))
0: ββHββββββββββββββββββββZβββ€
1: ββRX(1.5)ββRY(0.158)βββ°Cβββ€
The not only is the transformed qfunc fully differentiable, but the qfunc transform
parameters *themselves* are differentiable:
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
def ansatz(x):
qml.Hadamard(wires=0)
qml.CRX(x, wires=[0, 1])
@qml.qnode(dev)
def circuit(param, transform_weights):
qml.RX(0.1, wires=0)
# apply the transform to the ansatz
my_transform(*transform_weights)(ansatz)(param)
return qml.expval(qml.PauliZ(1))
We can print this QNode to show that the qfunc transform is taking place:
>>> x = np.array(0.5, requires_grad=True)
>>> y = np.array([0.1, 0.2], requires_grad=True)
>>> print(qml.draw(circuit)(x, y))
0: ββHβββββββββββββββββββββZβββ€
1: ββRX(0.05)ββRY(0.141)βββ°Cβββ€ β¨Zβ©
Evaluating the QNode, as well as the derivative, with respect to the gate
parameter *and* the transform weights:
>>> circuit(x, y)
0.9887793925354269
>>> qml.grad(circuit)(x, y)
(array(-0.02485651), array([-0.02474011, -0.09954244]))
"""
if not isinstance(tape_transform, single_tape_transform):
raise ValueError("Can only convert single tape transforms into qfunc transforms!")
@functools.wraps(tape_transform)
def make_qfunc_transform(*targs, **tkwargs):
def wrapper(fn):
@functools.wraps(fn)
def internal_wrapper(*args, **kwargs):
tape = make_tape(fn)(*args, **kwargs)
tape = tape_transform(tape, *targs, **tkwargs)
return tape.measurements
return internal_wrapper
return wrapper
return make_qfunc_transform
|
29,217 |
def _get_all_test_targets_from_path(test_path=None, include_load_tests=True):
"""Returns a list of test targets for all classes under test_path
containing tests.
"""
def _get_test_target_classes(path):
"""Returns a list of all test classes in a given test file path.
Args:
path: str. The path of the test file from which all test classes
are to be extracted.
Returns:
list. A list of all test classes in a given test file path.
"""
class_names = []
test_target_path = os.path.relpath(
path, os.getcwd())[:-3].replace('/', '.')
python_module = importlib.import_module(test_target_path)
for name, clazz in inspect.getmembers(
python_module, predicate=inspect.isclass):
if unittest.TestCase in inspect.getmro(clazz):
class_names.append(name)
return [
'%s.%s' % (test_target_path, class_name)
for class_name in class_names]
base_path = os.path.join(os.getcwd(), test_path or '')
result = []
excluded_dirs = [
'.git', 'third_party', 'node_modules', 'venv', 'core/tests/data']
for root in os.listdir(base_path):
if any([s in root for s in excluded_dirs]):
continue
if root.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(base_path, root)))
for subroot, _, files in os.walk(os.path.join(base_path, root)):
if any([s in subroot for s in excluded_dirs]):
continue
if _LOAD_TESTS_DIR in subroot and include_load_tests:
for f in files:
if f.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
for f in files:
if f.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
return result
|
def _get_all_test_targets_from_path(test_path=None, include_load_tests=True):
"""Returns a list of test targets for all classes under test_path
containing tests.
"""
def _get_test_target_classes(path):
"""Returns a list of all test classes in a given test file path.
Args:
path: str. The path of the test file from which all test classes
are to be extracted.
Returns:
list. A list of all test classes in a given test file path.
"""
class_names = []
test_target_path = os.path.relpath(
path, os.getcwd())[:-3].replace('/', '.')
python_module = importlib.import_module(test_target_path)
for name, clazz in inspect.getmembers(
python_module, predicate=inspect.isclass):
if unittest.TestCase in inspect.getmro(clazz):
class_names.append(name)
return [
'%s.%s' % (test_target_path, class_name)
for class_name in class_names]
base_path = os.path.join(os.getcwd(), test_path or '')
result = []
excluded_dirs = [
'.git', 'third_party', 'node_modules', 'venv', 'core/tests/data']
for root in os.listdir(base_path):
if any([s in root for s in excluded_dirs]):
continue
if root.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(base_path, root)))
for subroot, _, files in os.walk(os.path.join(base_path, root)):
if any(s in subroot for s in excluded_dirs):
continue
if _LOAD_TESTS_DIR in subroot and include_load_tests:
for f in files:
if f.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
for f in files:
if f.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
return result
|
34,534 |
def create_interpreter(
obj: Union[
"rasa.shared.nlu.interpreter.NaturalLanguageInterpreter",
EndpointConfig,
Text,
None,
]
) -> "rasa.shared.nlu.interpreter.NaturalLanguageInterpreter":
"""Factory to create an natural language interpreter."""
if isinstance(obj, rasa.shared.nlu.interpreter.NaturalLanguageInterpreter):
return obj
elif isinstance(obj, str) and os.path.exists(obj):
return RasaNLUInterpreter(model_directory=obj)
elif isinstance(obj, str) and not os.path.exists(obj):
# user passed in a string, but file does not exist
logger.warning(
f"No local NLU model '{obj}' found. Using RegexInterpreter instead."
)
return rasa.shared.nlu.interpreter.RegexInterpreter()
else:
return _create_from_endpoint_config(obj)
|
def create_interpreter(
obj: Union[
"rasa.shared.nlu.interpreter.NaturalLanguageInterpreter",
EndpointConfig,
Text,
None,
]
) -> "rasa.shared.nlu.interpreter.NaturalLanguageInterpreter":
"""Factory to create a natural language interpreter."""
if isinstance(obj, rasa.shared.nlu.interpreter.NaturalLanguageInterpreter):
return obj
elif isinstance(obj, str) and os.path.exists(obj):
return RasaNLUInterpreter(model_directory=obj)
elif isinstance(obj, str) and not os.path.exists(obj):
# user passed in a string, but file does not exist
logger.warning(
f"No local NLU model '{obj}' found. Using RegexInterpreter instead."
)
return rasa.shared.nlu.interpreter.RegexInterpreter()
else:
return _create_from_endpoint_config(obj)
|
23,525 |
def check_version_range(module_version, version):
"""
Check version string of a module against a required version.
"""
if ';' in version:
versions = version.split(';')
else:
versions = [version]
output = True
for _ver in versions:
match = re.search(r'[0-9]', _ver)
assert match is not None, "Invalid version number"
symb = _ver[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<', '<='),\
"Invalid version condition '%s'" % symb
ver = _ver[match.start():]
output = output and check_version(module_version, ver, symb)
return output
|
def check_version_range(module_version, version_range):
"""
Check version string of a module against a required version.
"""
if ';' in version:
versions = version.split(';')
else:
versions = [version]
output = True
for _ver in versions:
match = re.search(r'[0-9]', _ver)
assert match is not None, "Invalid version number"
symb = _ver[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<', '<='),\
"Invalid version condition '%s'" % symb
ver = _ver[match.start():]
output = output and check_version(module_version, ver, symb)
return output
|
32,217 |
def test_nginx_web_server(port: int, params: Dict):
protocol = 'https' if params.get('key') else 'http'
res = requests.get(f'{protocol}://localhost:{port}/nginx-test',
verify=False, proxies={"http": "", "https": ""}) # nosec guardrails-disable-line
res.raise_for_status()
welcome = 'Welcome to nginx'
if welcome not in res.text:
raise ValueError(f'Unexpected response from nginx-text (does not contain "{welcome}"): {res.text}')
|
def test_nginx_web_server(port: int, params: Dict):
protocol = 'https' if params.get('key') else 'http'
res = requests.get(f'{protocol}://localhost:{port}/nginx-test',
verify=False, proxies={"http": "", "https": ""}) # nosec guardrails-disable-line
res.raise_for_status()
welcome = 'Welcome to nginx'
if welcome not in res.text:
raise ValueError(f'Unexpected response from nginx-test (does not contain "{welcome}"): {res.text}')
|
21,576 |
def main():
parser = argparse.ArgumentParser(
description="""Adds a signature to a JSON object.
Example usage:
$ scripts-dev/sign_json.py -N test -k localhost.signing.key "{}"
{"signatures":{"test":{"ed25519:a_ZnZh":"LmPnml6iM0iR..."}}}
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"-N",
"--server-name",
help="Name to give as the local homeserver. If unspecified, will be "
"read from the config file.",
)
parser.add_argument(
"-k",
"--signing-key-path",
help="Path to the file containing the private ed25519 key to sign the "
"request with.",
)
parser.add_argument(
"-c",
"--config",
default="homeserver.yaml",
help=(
"Path to synapse config file, from which the server name and/or signing "
"key path will be read. Ignored if --server-name and --signing-key-path "
"are both given."
),
)
input_args = parser.add_mutually_exclusive_group()
input_args.add_argument("input_data", nargs="?", help="Raw JSON to be signed.")
input_args.add_argument(
"-i",
"--input",
type=argparse.FileType("r"),
default=sys.stdin,
help=(
"A file from which to read the JSON to be signed. If neither --input nor "
"input_data are given, JSON will be read from stdin."
),
)
parser.add_argument(
"-o",
"--output",
type=argparse.FileType("w"),
default=sys.stdout,
help="Where to write the signed JSON. Defaults to stdout.",
)
args = parser.parse_args()
if not args.server_name or not args.signing_key_path:
read_args_from_config(args)
with open(args.signing_key_path) as f:
key = read_signing_keys(f)[0]
json_to_sign = args.input_data
if json_to_sign is None:
json_to_sign = args.input.read()
try:
obj = json.loads(json_to_sign)
except JSONDecodeError as e:
print("Unable to parse input as JSON: %s" % e, file=sys.stderr)
sys.exit(1)
if not isinstance(obj, dict):
print("Input json was not a dict", file=sys.stderr)
sys.exit(1)
sign_json(obj, args.server_name, key)
for c in json_encoder.iterencode(obj):
args.output.write(c)
args.output.write("\n")
|
def main():
parser = argparse.ArgumentParser(
description="""Adds a signature to a JSON object.
Example usage:
$ scripts-dev/sign_json.py -N test -k localhost.signing.key "{}"
{"signatures":{"test":{"ed25519:a_ZnZh":"LmPnml6iM0iR..."}}}
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"-N",
"--server-name",
help="Name to give as the local homeserver. If unspecified, will be "
"read from the config file.",
)
parser.add_argument(
"-k",
"--signing-key-path",
help="Path to the file containing the private ed25519 key to sign the "
"request with.",
)
parser.add_argument(
"-c",
"--config",
default="homeserver.yaml",
help=(
"Path to synapse config file, from which the server name and/or signing "
"key path will be read. Ignored if --server-name and --signing-key-path "
"are both given."
),
)
input_args = parser.add_mutually_exclusive_group()
input_args.add_argument("input_data", nargs="?", help="Raw JSON to be signed.")
input_args.add_argument(
"-i",
"--input",
type=argparse.FileType("r"),
default=sys.stdin,
help=(
"A file from which to read the JSON to be signed. If neither --input nor "
"input_data are given, JSON will be read from stdin."
),
)
parser.add_argument(
"-o",
"--output",
type=argparse.FileType("w"),
default=sys.stdout,
help="Where to write the signed JSON. Defaults to stdout.",
)
args = parser.parse_args()
if not args.server_name or not args.signing_key_path:
read_args_from_config(args)
with open(args.signing_key_path) as f:
key = read_signing_keys(f)[0]
json_to_sign = args.input_data
if json_to_sign is None:
json_to_sign = args.input.read()
try:
obj = json.loads(json_to_sign)
except JSONDecodeError as e:
print("Unable to parse input as JSON: %s" % e, file=sys.stderr)
sys.exit(1)
if not isinstance(obj, dict):
print("Input json was not an object", file=sys.stderr)
sys.exit(1)
sign_json(obj, args.server_name, key)
for c in json_encoder.iterencode(obj):
args.output.write(c)
args.output.write("\n")
|
23,593 |
def spa_c(time, latitude, longitude, pressure=101325, altitude=0,
temperature=12, delta_t=67.0,
raw_spa_output=False):
"""
Calculate the solar position using the C implementation of the NREL
SPA code.
The source files for this code are located in './spa_c_files/', along with
a README file which describes how the C code is wrapped in Python.
Due to license restrictions, the C code must be downloaded seperately
and used in accordance with it's license.
This function is slower and no more accurate than :py:func:`spa_python`.
Parameters
----------
time : pandas.DatetimeIndex
Must be localized or UTC will be assumed.
latitude : float
Latitude in decimal degrees. Positive north of equator, negative
to south.
longitude : float
Longitude in decimal degrees. Positive east of prime meridian,
negative to west.
pressure : float, default 101325
Pressure in Pascals
altitude : float, default 0
Distance above sea level.
temperature : float, default 12
Temperature in C
delta_t : float, default 67.0
Difference between terrestrial time and UT1.
USNO has previous values and predictions.
raw_spa_output : bool, default False
If true, returns the raw SPA output.
Returns
-------
DataFrame
The DataFrame will have the following columns:
elevation,
azimuth,
zenith,
apparent_elevation,
apparent_zenith.
References
----------
.. [1] NREL SPA reference:
http://rredc.nrel.gov/solar/codesandalgorithms/spa/
NREL SPA C files: https://midcdmz.nrel.gov/spa/
Note: The ``timezone`` field in the SPA C files is replaced with
``time_zone`` to avoid a nameclash with the function ``__timezone`` that is
redefined by Python>=3.5. This issue is
`Python bug 24643 <https://bugs.python.org/issue24643>`_.
.. [2] USNO delta T:
http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_python, ephemeris
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Edited by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
raise ImportError('Could not import built-in SPA calculator. ' +
'You may need to recompile the SPA code.')
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time
spa_out = []
for date in time_utc:
spa_out.append(spa_calc(year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
time_zone=0, # date uses utc time
latitude=latitude,
longitude=longitude,
elevation=altitude,
pressure=pressure / 100,
temperature=temperature,
delta_t=delta_t
))
spa_df = pd.DataFrame(spa_out, index=time)
if raw_spa_output:
# rename "time_zone" from raw output from spa_c_files.spa_py.spa_calc()
# to "timezone" to match the API of pvlib.solarposition.spa_c()
return spa_df.rename(columns={'time_zone': 'timezone'})
else:
dfout = pd.DataFrame({'azimuth': spa_df['azimuth'],
'apparent_zenith': spa_df['zenith'],
'apparent_elevation': spa_df['e'],
'elevation': spa_df['e0'],
'zenith': 90 - spa_df['e0']})
return dfout
|
def spa_c(time, latitude, longitude, pressure=101325, altitude=0,
temperature=12, delta_t=67.0,
raw_spa_output=False):
"""
Calculate the solar position using the C implementation of the NREL
SPA code.
The source files for this code are located in './spa_c_files/', along with
a README file which describes how the C code is wrapped in Python.
Due to license restrictions, the C code must be downloaded seperately
and used in accordance with it's license.
This function is slower and no more accurate than :py:func:`spa_python`.
Parameters
----------
time : pandas.DatetimeIndex
Must be localized or UTC will be assumed.
latitude : float
Latitude in decimal degrees. Positive north of equator, negative
to south.
longitude : float
Longitude in decimal degrees. Positive east of prime meridian,
negative to west.
pressure : float, default 101325
Pressure in Pascals
altitude : float, default 0
Height above sea level. [m]
temperature : float, default 12
Temperature in C
delta_t : float, default 67.0
Difference between terrestrial time and UT1.
USNO has previous values and predictions.
raw_spa_output : bool, default False
If true, returns the raw SPA output.
Returns
-------
DataFrame
The DataFrame will have the following columns:
elevation,
azimuth,
zenith,
apparent_elevation,
apparent_zenith.
References
----------
.. [1] NREL SPA reference:
http://rredc.nrel.gov/solar/codesandalgorithms/spa/
NREL SPA C files: https://midcdmz.nrel.gov/spa/
Note: The ``timezone`` field in the SPA C files is replaced with
``time_zone`` to avoid a nameclash with the function ``__timezone`` that is
redefined by Python>=3.5. This issue is
`Python bug 24643 <https://bugs.python.org/issue24643>`_.
.. [2] USNO delta T:
http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_python, ephemeris
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Edited by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
raise ImportError('Could not import built-in SPA calculator. ' +
'You may need to recompile the SPA code.')
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time
spa_out = []
for date in time_utc:
spa_out.append(spa_calc(year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
time_zone=0, # date uses utc time
latitude=latitude,
longitude=longitude,
elevation=altitude,
pressure=pressure / 100,
temperature=temperature,
delta_t=delta_t
))
spa_df = pd.DataFrame(spa_out, index=time)
if raw_spa_output:
# rename "time_zone" from raw output from spa_c_files.spa_py.spa_calc()
# to "timezone" to match the API of pvlib.solarposition.spa_c()
return spa_df.rename(columns={'time_zone': 'timezone'})
else:
dfout = pd.DataFrame({'azimuth': spa_df['azimuth'],
'apparent_zenith': spa_df['zenith'],
'apparent_elevation': spa_df['e'],
'elevation': spa_df['e0'],
'zenith': 90 - spa_df['e0']})
return dfout
|
19,859 |
def _copy_m2m_relations(source, target, exclude_fields=None, update_attrs=None):
"""
Copies non-ParentalManyToMany m2m relations
"""
update_attrs = update_attrs or {}
exclude_fields = exclude_fields or []
for field in source._meta.get_fields():
# Copy m2m relations. Ignore explicitly excluded fields, reverse relations, and Parental m2m fields.
if field.many_to_many and field.name not in exclude_fields and not field.auto_created and not isinstance(field, ParentalManyToManyField):
try:
# Do not copy m2m links with a through model that has a ParentalKey to the model being copied - these will be copied as child objects
through_model_parental_links = [field for field in field.through._meta.get_fields() if isinstance(field, ParentalKey) and (field.related_model == source.__class__ or field.related_model in source._meta.get_parent_list())]
if through_model_parental_links:
continue
except AttributeError:
pass
if field.name in update_attrs:
value = update_attrs[field.name]
else:
value = getattr(source, field.name).all()
getattr(target, field.name).set(value)
|
def _copy_m2m_relations(source, target, exclude_fields=None, update_attrs=None):
"""
Copies non-ParentalManyToMany m2m relations
"""
update_attrs = update_attrs or {}
exclude_fields = exclude_fields or []
for field in source._meta.get_fields():
# Copy m2m relations. Ignore explicitly excluded fields, reverse relations, and Parental m2m fields.
if field.many_to_many and field.name not in exclude_fields and not field.auto_created and not isinstance(field, ParentalManyToManyField):
try:
# Do not copy m2m links with a through model that has a ParentalKey to the model being copied - these will be copied as child objects
through_model_parental_links = [field for field in field.through._meta.get_fields() if isinstance(field, ParentalKey) and issubclass(source.__class__, field.related_model)]
if through_model_parental_links:
continue
except AttributeError:
pass
if field.name in update_attrs:
value = update_attrs[field.name]
else:
value = getattr(source, field.name).all()
getattr(target, field.name).set(value)
|
43,061 |
def reassemble_multi(A, idtodelete):
"""
Puts the matrices A inside larger matrices of dimensions
dim(A)+len(idtodelete)
The empty space are filled with zeros (offdiagonal) and ones (diagonals)
"""
nweights = len(A[:, 0, 0])
ntot = len(A[0]) + len(idtodelete)
ind = np.sort(list(set(np.arange(ntot)) - set(idtodelete)))
newmat = np.tile(np.eye(ntot, dtype=complex), (nweights, 1, 1))
newmat[np.ix_(np.arange(newmat.shape[0], dtype=int), ind, ind)] = A
return newmat
|
def reassemble_multi(A, idtodelete):
"""
Puts the matrices A inside larger matrices of dimensions
``dim(A)+len(idtodelete)``
The empty space are filled with zeros (offdiagonal) and ones (diagonals)
"""
nweights = len(A[:, 0, 0])
ntot = len(A[0]) + len(idtodelete)
ind = np.sort(list(set(np.arange(ntot)) - set(idtodelete)))
newmat = np.tile(np.eye(ntot, dtype=complex), (nweights, 1, 1))
newmat[np.ix_(np.arange(newmat.shape[0], dtype=int), ind, ind)] = A
return newmat
|
4,403 |
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif not (len(items) == 4 or len(items) == 5):
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
if len(items) == 5:
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
elif len(items) == 4:
label = items[0]
if label == 'Centroid':
continue # centroid
pos = np.array([float(item) for item in items[1:]])
if label == 'Nasion':
nasion = pos
elif label == 'Left':
lpa = pos
elif label == 'Right':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
|
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif not (len(items) == 4 or len(items) == 5):
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
if len(items) == 5:
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
elif len(items) == 4:
label = items[0]
if label == 'Centroid':
continue
pos = np.array([float(item) for item in items[1:]])
if label == 'Nasion':
nasion = pos
elif label == 'Left':
lpa = pos
elif label == 'Right':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
|
27,713 |
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
generator = fixturefunc(**kwargs)
try:
fixture_return_value = next(generator)
except StopIteration:
raise ValueError(
"Fixture {} did not yield a value".format(fixturefunc.__name__)
)
else:
finalizer = functools.partial(
_teardown_yield_fixture, fixturefunc, generator
)
request.addfinalizer(finalizer)
else:
fixture_return_value = fixturefunc(**kwargs)
return fixture_return_value
|
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
generator = fixturefunc(**kwargs)
try:
fixture_return_value = next(generator)
except StopIteration:
raise ValueError(
"Fixture {} did not yield a value".format(fixturefunc.__name__)
) from None
else:
finalizer = functools.partial(
_teardown_yield_fixture, fixturefunc, generator
)
request.addfinalizer(finalizer)
else:
fixture_return_value = fixturefunc(**kwargs)
return fixture_return_value
|
46,344 |
def compute(train_data, train_labels, predict_data, nClasses):
# Create an algorithm object and call compute
train_algo = d4p.bf_knn_classification_training(nClasses=nClasses)
train_result = train_algo.compute(train_data, train_labels)
# Create an algorithm object and call compute
predict_algo = d4p.bf_knn_classification_prediction()
predict_result = predict_algo.compute(predict_data, train_result.model)
return predict_result
|
def compute(train_data, train_labels, predict_data, nClasses):
# Create an algorithm object and call compute
train_algo = d4p.bf_knn_classification_training(nClasses=nClasses)
train_result = train_algo.compute(train_data, train_labels)
# Create an algorithm object and call compute
predict_algo = d4p.bf_knn_classification_prediction(nClasses=nClasses)
predict_result = predict_algo.compute(predict_data, train_result.model)
return predict_result
|
10,499 |
def flatten(mylist, levels=None, skip_nulls=True):
ret = []
for element in mylist:
if skip_nulls and element in (None, 'None', 'null'):
# ignore null items
continue
elif is_sequence(element):
if levels is None:
ret.extend(flatten(element))
elif levels >= 1:
# decrement as we go down the stack
ret.extend(flatten(element, levels=(int(levels) - 1)))
else:
ret.append(element)
else:
ret.append(element)
return ret
|
def flatten(mylist, levels=None, skip_nulls=True):
ret = []
for element in mylist:
if skip_nulls and element in (None, 'None', 'null'):
# ignore null items
continue
elif is_sequence(element):
if levels is None:
ret.extend(flatten(element, skip_nulls=skip_nulls))
elif levels >= 1:
# decrement as we go down the stack
ret.extend(flatten(element, levels=(int(levels) - 1)))
else:
ret.append(element)
else:
ret.append(element)
return ret
|
32,273 |
def warninglist_command(demisto_args: dict) -> CommandResults:
"""
Check values against MISP warninglists.
"""
res = []
values = demisto_args["value"].split(",")
response = PYMISP.values_in_warninglist(values)
if 'errors' in response:
raise DemistoException(f'Unable to validate against MISPwarninglist!\nError message: {response}')
if not response:
return CommandResults(
readable_output="No value is on a MISP warning list!",
raw_response=response,
)
for value, lists in response.items():
if len(lists) > 0:
res.append(
{
"Value": value,
"Count": len(lists),
"Lists": ",".join([x["name"] for x in lists]),
}
)
human_readable = tableToMarkdown(
"MISP Warninglist matchings:",
sorted(res, key=lambda x: x["Count"], reverse=True),
headers=["Value", "Lists", "Count"],
)
return CommandResults(
outputs=res,
outputs_prefix="MISP.Warninglist",
outputs_key_field=["Value"],
readable_output=human_readable,
raw_response=response,
)
|
def warninglist_command(demisto_args: dict) -> CommandResults:
"""
Check values against MISP warninglists.
"""
res = []
values = argToList(demisto_args["value"])
response = PYMISP.values_in_warninglist(values)
if 'errors' in response:
raise DemistoException(f'Unable to validate against MISPwarninglist!\nError message: {response}')
if not response:
return CommandResults(
readable_output="No value is on a MISP warning list!",
raw_response=response,
)
for value, lists in response.items():
if len(lists) > 0:
res.append(
{
"Value": value,
"Count": len(lists),
"Lists": ",".join([x["name"] for x in lists]),
}
)
human_readable = tableToMarkdown(
"MISP Warninglist matchings:",
sorted(res, key=lambda x: x["Count"], reverse=True),
headers=["Value", "Lists", "Count"],
)
return CommandResults(
outputs=res,
outputs_prefix="MISP.Warninglist",
outputs_key_field=["Value"],
readable_output=human_readable,
raw_response=response,
)
|
22,791 |
def unregister(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Deactivate account on server
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating and error
:rtype: None or str
"""
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
if not accounts:
return "Could not find existing account to deactivate."
prompt = ("Are you sure you would like to irrevocably deactivate "
"your account?")
wants_deactivate = display_util.yesno(prompt, yes_label='Deactivate', no_label='Abort',
default=True)
if not wants_deactivate:
return "Deactivation aborted."
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
if not cb_client.acme:
raise errors.Error("ACME client is not set.")
# delete on boulder
cb_client.acme.deactivate_registration(acc.regr)
account_files = account.AccountFileStorage(config)
# delete local account files
account_files.delete(config.account)
display_util.notify("Account deactivated.")
return None
|
def unregister(config: configuration.NamespaceConfig,
unused_plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Deactivate account on server
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param unused_plugins: List of plugins (deprecated)
:type unused_plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating an error
:rtype: None or str
"""
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
if not accounts:
return "Could not find existing account to deactivate."
prompt = ("Are you sure you would like to irrevocably deactivate "
"your account?")
wants_deactivate = display_util.yesno(prompt, yes_label='Deactivate', no_label='Abort',
default=True)
if not wants_deactivate:
return "Deactivation aborted."
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
if not cb_client.acme:
raise errors.Error("ACME client is not set.")
# delete on boulder
cb_client.acme.deactivate_registration(acc.regr)
account_files = account.AccountFileStorage(config)
# delete local account files
account_files.delete(config.account)
display_util.notify("Account deactivated.")
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.