id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
2,072 |
def detection_error_tradeoff_curve(y_true, y_score, pos_label=None,
sample_weight=None):
"""Compute error rates for different probability thresholds.
Note: This metrics is used for ranking evaluation of a binary
classification task.
Read more in the :ref:`User Guide <det_curve>`.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape of (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fpr : ndarray of shape (n_thresholds,)
False positive rate (FPR) such that element i is the false positive
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false acceptance propability or fall-out.
fnr : ndarray of shape (n_thresholds,)
False negative rate (FNR) such that element i is the false negative
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false rejection or miss rate.
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
See Also
--------
roc_curve : Compute Receiver operating characteristic (ROC) curve
precision_recall_curve : Compute precision-recall curve
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import detection_error_tradeoff_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, fnr, thresholds = detection_error_tradeoff_curve(y_true, y_scores)
>>> fpr
array([0.5, 0.5, 0. ])
>>> fnr
array([0. , 0.5, 0.5])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. Detection error "
"tradeoff curve is not defined in that case.")
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
fns = tps[-1] - tps
p_count = tps[-1]
n_count = fps[-1]
# start with false positives zero
first_ind = (
fps.searchsorted(fps[0], side='right') - 1
if fps.searchsorted(fps[0], side='right') > 0
else None
)
# stop with false negatives zero
last_ind = tps.searchsorted(tps[-1]) + 1
sl = slice(first_ind, last_ind)
# reverse the output such that list of false positives is decreasing
return (
fps[sl][::-1] / n_count,
fns[sl][::-1] / p_count,
thresholds[sl][::-1]
)
|
def detection_error_tradeoff_curve(y_true, y_score, pos_label=None,
sample_weight=None):
"""Compute error rates for different probability thresholds.
Note: This metrics is used for ranking evaluation of a binary
classification task.
Read more in the :ref:`User Guide <det_curve>`.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape of (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by :term:`decision_function` on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fpr : ndarray of shape (n_thresholds,)
False positive rate (FPR) such that element i is the false positive
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false acceptance propability or fall-out.
fnr : ndarray of shape (n_thresholds,)
False negative rate (FNR) such that element i is the false negative
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false rejection or miss rate.
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
See Also
--------
roc_curve : Compute Receiver operating characteristic (ROC) curve
precision_recall_curve : Compute precision-recall curve
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import detection_error_tradeoff_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, fnr, thresholds = detection_error_tradeoff_curve(y_true, y_scores)
>>> fpr
array([0.5, 0.5, 0. ])
>>> fnr
array([0. , 0.5, 0.5])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. Detection error "
"tradeoff curve is not defined in that case.")
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
fns = tps[-1] - tps
p_count = tps[-1]
n_count = fps[-1]
# start with false positives zero
first_ind = (
fps.searchsorted(fps[0], side='right') - 1
if fps.searchsorted(fps[0], side='right') > 0
else None
)
# stop with false negatives zero
last_ind = tps.searchsorted(tps[-1]) + 1
sl = slice(first_ind, last_ind)
# reverse the output such that list of false positives is decreasing
return (
fps[sl][::-1] / n_count,
fns[sl][::-1] / p_count,
thresholds[sl][::-1]
)
|
28,958 |
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
state: str = MISSING
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
.. versionchanged:: 2.0
``permissions``, ``guild``, ``redirect_uri``, ``scopes`` and ``state`` parameters
are now keyword-only.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot', 'applications.commands')``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
state: :class:`str`
The state to return after the authorization.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes or ('bot', 'applications.commands'))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select:
url += '&disable_guild_select=true'
if state is not MISSING:
url += f'&state={state}'
return url
|
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
state: str = MISSING,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
.. versionchanged:: 2.0
``permissions``, ``guild``, ``redirect_uri``, ``scopes`` and ``state`` parameters
are now keyword-only.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot', 'applications.commands')``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
state: :class:`str`
The state to return after the authorization.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes or ('bot', 'applications.commands'))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select:
url += '&disable_guild_select=true'
if state is not MISSING:
url += f'&state={state}'
return url
|
27,969 |
def get_analyzer_checkers_cmd(clang_version_info, env, plugins,
alpha=True, debug=True):
"""Return the checkers list which depends on the used clang version.
plugins should be a list of path to clang plugins (so with checkers)
Before clang9 alpha and debug checkers were printed by default.
Since clang9 there are extra arguments to print the additional checkers.
"""
major_version = clang_version_info.major_version
command = []
for plugin in plugins:
command.extend(["-load", plugin])
command.append("-analyzer-checker-help")
# The clang compiler os OSX is a few
# relases older that the open source clang release.
# The new checker help printig flags are not available there yet.
# If the OSX clang will be updated to based on clang v8
# this early return can be removed.
if clang_version_info.vendor != "clang":
return command
if alpha and major_version > 8:
command.append("-analyzer-checker-help-alpha")
if debug and major_version > 8:
command.append("-analyzer-checker-help-developer")
return command
|
def get_analyzer_checkers_cmd(clang_version_info, env, plugins,
alpha=True, debug=True):
"""Return the checkers list which depends on the used clang version.
plugins should be a list of path to clang plugins (so with checkers)
Before clang9 alpha and debug checkers were printed by default.
Since clang9 there are extra arguments to print the additional checkers.
"""
major_version = clang_version_info.major_version
command = []
for plugin in plugins:
command.extend(["-load", plugin])
command.append("-analyzer-checker-help")
# The clang compiler on OSX is a few
# relases older that the open source clang release.
# The new checker help printig flags are not available there yet.
# If the OSX clang will be updated to based on clang v8
# this early return can be removed.
if clang_version_info.vendor != "clang":
return command
if alpha and major_version > 8:
command.append("-analyzer-checker-help-alpha")
if debug and major_version > 8:
command.append("-analyzer-checker-help-developer")
return command
|
54,476 |
def _get_pareto_front_2d(
study: MultiObjectiveStudy,
names: Optional[List[str]],
include_dominated_trials: bool = False,
axis_order: Optional[List[int]] = None,
) -> "go.Figure":
if names is None:
names = ["Objective 0", "Objective 1"]
elif len(names) != 2:
raise ValueError("The length of `names` is supposed to be 2.")
trials = study.get_pareto_front_trials()
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
point_colors = ["blue"] * len(trials)
if include_dominated_trials:
non_pareto_trials = _get_non_pareto_front_trials(study, trials)
point_colors += ["red"] * len(non_pareto_trials)
trials += non_pareto_trials
if axis_order is None:
axis_order = list(range(2))
elif max(axis_order) > 1:
_logger.warning("axis_order contains invalid index higher than 1")
elif min(axis_order) < 0:
_logger.warning("axis_order contains invalid index lower than 0")
data = go.Scatter(
x=[t.values[axis_order[0]] for t in trials],
y=[t.values[axis_order[1]] for t in trials],
text=[_make_hovertext(t) for t in trials],
mode="markers",
hovertemplate="%{text}<extra></extra>",
marker={"color": point_colors},
)
layout = go.Layout(
title="Pareto-front Plot",
xaxis_title=names[axis_order[0]],
yaxis_title=names[axis_order[1]],
)
return go.Figure(data=data, layout=layout)
|
def _get_pareto_front_2d(
study: MultiObjectiveStudy,
names: Optional[List[str]],
include_dominated_trials: bool = False,
axis_order: Optional[List[int]] = None,
) -> "go.Figure":
if names is None:
names = ["Objective 0", "Objective 1"]
elif len(names) != 2:
raise ValueError("The length of `names` is supposed to be 2.")
trials = study.get_pareto_front_trials()
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
point_colors = ["blue"] * len(trials)
if include_dominated_trials:
non_pareto_trials = _get_non_pareto_front_trials(study, trials)
point_colors += ["red"] * len(non_pareto_trials)
trials += non_pareto_trials
if axis_order is None:
axis_order = list(range(2))
else:
if len(axis_order) != 2:
raise ValueError(f"Size of `axis_order`. Expect: 2, Actual: {len(axis_order)}")
if (np.unique(axis_order).size != 2):
raise ValueError(f"Elements of given `axis_order` {axis_order} are not unique!")
if max(axis_order) > 1:
_logger.warning("axis_order contains invalid index higher than 1")
elif min(axis_order) < 0:
_logger.warning("axis_order contains invalid index lower than 0")
data = go.Scatter(
x=[t.values[axis_order[0]] for t in trials],
y=[t.values[axis_order[1]] for t in trials],
text=[_make_hovertext(t) for t in trials],
mode="markers",
hovertemplate="%{text}<extra></extra>",
marker={"color": point_colors},
)
layout = go.Layout(
title="Pareto-front Plot",
xaxis_title=names[axis_order[0]],
yaxis_title=names[axis_order[1]],
)
return go.Figure(data=data, layout=layout)
|
32,238 |
def install_software(topology: Topology, version: str,
device_filter_string: str = None, sync: bool = False) -> InstallSoftwareCommandResult:
"""
Install the given software version onto the device. Download the software first with
pan-os-platform-download-software
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only install to specific devices or serial numbers
:param version: software version to upgrade to, ex. 9.1.2
:param sync: If provided, runs the download synchronously - make sure 'execution-timeout' is increased.
"""
if sync == "false":
sync = False
result: InstallSoftwareCommandResult = UniversalCommand.install_software(topology, version,
device_filter_str=device_filter_string,
sync=sync)
return result
|
def install_software(topology: Topology, version: str,
device_filter_string: Optional[str] = None, sync: Optional[bool] = False) -> InstallSoftwareCommandResult:
"""
Install the given software version onto the device. Download the software first with
pan-os-platform-download-software
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only install to specific devices or serial numbers
:param version: software version to upgrade to, ex. 9.1.2
:param sync: If provided, runs the download synchronously - make sure 'execution-timeout' is increased.
"""
if sync == "false":
sync = False
result: InstallSoftwareCommandResult = UniversalCommand.install_software(topology, version,
device_filter_str=device_filter_string,
sync=sync)
return result
|
57,770 |
def main():
channel_type = demisto.args().get('type')
channel_name = demisto.args().get('name')
channel_desc = demisto.args().get('description')
channel_team = demisto.args().get('team')
errors = []
integrations_to_create = []
channels_created = []
for brand in ['SlackV2', 'Microsoft Teams']:
res = demisto.executeCommand('IsIntegrationAvailable', {'brandname': brand})
if res[0].get('Contents') == 'yes':
integrations_to_create.append(brand)
if not integrations_to_create:
return_error('Microsoft Teams and Slack are not available, please configure at least one of them.')
for integration in integrations_to_create:
res = None
if integration == 'SlackV2':
res = demisto.executeCommand('slack-create-channel', {'type': channel_type, 'name': channel_name})
elif integration == 'Microsoft Teams':
if channel_team:
res = demisto.executeCommand('microsoft-teams-create-channel',
{'channel_name': channel_name, 'description': channel_desc, 'team': channel_team})
else:
errors.append('Failed to create channel in Microsoft Teams: team argument is missing')
if is_error(res):
errors.append(f'Failed to create channel in {integration}: {res[0].get("Contents")}')
elif res:
channels_created.append(integration)
errors_str = '\n'.join(errors)
# in case of no channel created
if len(channels_created) == 0:
return_error(errors_str)
# in case of channel created in all the available brands(Microsoft Teams and Slack)
elif len(channels_created) == len(integrations_to_create):
return_results(f'Channel {channel_name} created successfully.')
# in case of only one channel created
else:
return_results(f'Channel {channel_name} created successfully.\n{errors_str}')
|
def main():
args = demisto.args()
channel_type = args.get('type')
channel_name = args.get('name')
channel_desc = args.get('description')
channel_team = args.get('team')
errors = []
integrations_to_create = []
channels_created = []
for brand in ['SlackV2', 'Microsoft Teams']:
res = demisto.executeCommand('IsIntegrationAvailable', {'brandname': brand})
if res[0].get('Contents') == 'yes':
integrations_to_create.append(brand)
if not integrations_to_create:
return_error('Microsoft Teams and Slack are not available, please configure at least one of them.')
for integration in integrations_to_create:
res = None
if integration == 'SlackV2':
res = demisto.executeCommand('slack-create-channel', {'type': channel_type, 'name': channel_name})
elif integration == 'Microsoft Teams':
if channel_team:
res = demisto.executeCommand('microsoft-teams-create-channel',
{'channel_name': channel_name, 'description': channel_desc, 'team': channel_team})
else:
errors.append('Failed to create channel in Microsoft Teams: team argument is missing')
if is_error(res):
errors.append(f'Failed to create channel in {integration}: {res[0].get("Contents")}')
elif res:
channels_created.append(integration)
errors_str = '\n'.join(errors)
# in case of no channel created
if len(channels_created) == 0:
return_error(errors_str)
# in case of channel created in all the available brands(Microsoft Teams and Slack)
elif len(channels_created) == len(integrations_to_create):
return_results(f'Channel {channel_name} created successfully.')
# in case of only one channel created
else:
return_results(f'Channel {channel_name} created successfully.\n{errors_str}')
|
8,887 |
def run(settings, pid_file, daemon=False):
"""Run the bot with these ``settings``
:param settings: bot's settings to run with
:type settings: :class:`sopel.config.Config`
:param str pid_file: path to the bot's PID file
:param bool daemon: tell if the bot should be ran as a daemon
"""
delay = 20
# Acts as a welcome message, showing the program and platform version at start
print_version()
# Also show the location of the config file used to load settings
print("\nLoaded config file: {}".format(settings.filename))
if not settings.core.ca_certs:
tools.stderr(
'Could not open CA certificates file. SSL will not work properly!')
# Define empty variable `p` for bot
p = None
while True:
if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(settings, daemon=daemon)
p.setup()
p.set_signal_handlers()
except KeyboardInterrupt:
tools.stderr('Bot setup interrupted')
break
except Exception:
# In that case, there is nothing we can do.
# If the bot can't setup itself, then it won't run.
# This is a critical case scenario, where the user should have
# direct access to the exception traceback right in the console.
# Besides, we can't know if logging has been set up or not, so
# we can't rely on that here.
tools.stderr('Unexpected error in bot setup')
raise
try:
p.run(settings.core.host, int(settings.core.port))
except KeyboardInterrupt:
break
except Exception:
err_log = logging.getLogger('sopel.exceptions')
err_log.exception('Critical exception in core')
err_log.error('----------------------------------------')
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
LOGGER.warning('Disconnected. Reconnecting in %s seconds...', delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
|
def run(settings, pid_file, daemon=False):
"""Run the bot with these ``settings``
:param settings: bot's settings to run with
:type settings: :class:`sopel.config.Config`
:param str pid_file: path to the bot's PID file
:param bool daemon: tell if the bot should be run as a daemon
"""
delay = 20
# Acts as a welcome message, showing the program and platform version at start
print_version()
# Also show the location of the config file used to load settings
print("\nLoaded config file: {}".format(settings.filename))
if not settings.core.ca_certs:
tools.stderr(
'Could not open CA certificates file. SSL will not work properly!')
# Define empty variable `p` for bot
p = None
while True:
if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(settings, daemon=daemon)
p.setup()
p.set_signal_handlers()
except KeyboardInterrupt:
tools.stderr('Bot setup interrupted')
break
except Exception:
# In that case, there is nothing we can do.
# If the bot can't setup itself, then it won't run.
# This is a critical case scenario, where the user should have
# direct access to the exception traceback right in the console.
# Besides, we can't know if logging has been set up or not, so
# we can't rely on that here.
tools.stderr('Unexpected error in bot setup')
raise
try:
p.run(settings.core.host, int(settings.core.port))
except KeyboardInterrupt:
break
except Exception:
err_log = logging.getLogger('sopel.exceptions')
err_log.exception('Critical exception in core')
err_log.error('----------------------------------------')
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
LOGGER.warning('Disconnected. Reconnecting in %s seconds...', delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
|
13,902 |
def parse_coverage(
lines: List[str],
*,
filename: str,
logger: Logger,
exclude_lines_by_pattern: Optional[str],
exclude_pattern_prefix: Optional[str],
flags: ParserFlags,
) -> FileCoverage:
"""
Extract coverage data from a gcov report.
Logging:
Parse problems are reported as warnings.
Coverage exclusion decisions are reported as verbose messages.
Arguments:
lines: the lines of the file to be parsed (excluding newlines)
filename: for error reports
logger: for error reports
exclude_lines_by_pattern: string with regex syntax to exclude
individual lines
exclude_pattern_prefix: string with prefix for _LINE/_START/_STOP markers (default present when None is passed)
flags: various choices for the parser behavior
Returns:
the coverage data
Raises:
Any exceptions during parsing, unless `ParserFlags.IGNORE_PARSE_ERRORS`
is enabled.
"""
context = _Context(flags, logger, filename)
lines_with_errors: List[_LineWithError] = []
tokenized_lines: List[Tuple[_Line, str]] = []
for raw_line in lines:
# empty lines shouldn't occur in reality, but are common in testing
if not raw_line:
continue
try:
tokenized_lines.append((_parse_line(raw_line), raw_line))
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
if flags & ParserFlags.RESPECT_EXCLUSION_MARKERS or flags & ParserFlags.PARSE_DECISIONS:
src_lines = [
(line.lineno, line.source_code)
for line, _ in tokenized_lines
if isinstance(line, _SourceLine)
]
if flags & ParserFlags.RESPECT_EXCLUSION_MARKERS:
line_is_excluded = _find_excluded_ranges(
lines=src_lines,
warnings=_ExclusionRangeWarnings(logger, filename),
exclude_lines_by_pattern=exclude_lines_by_pattern,
exclude_pattern_prefix=exclude_pattern_prefix if exclude_pattern_prefix is not None else r"[GL]COVR?"
)
else:
line_is_excluded = _make_is_in_any_range([])
coverage = FileCoverage(filename)
state = _ParserState()
for line, raw_line in tokenized_lines:
try:
state = _gather_coverage_from_line(
state,
line,
coverage=coverage,
line_is_excluded=line_is_excluded,
context=context,
)
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
state = _ParserState(is_recovering=True)
# Clean up the final state. This shouldn't happen,
# but the last line could theoretically contain pending function lines
for function in state.deferred_functions:
_add_coverage_for_function(coverage, state.lineno + 1, function, context)
if flags & ParserFlags.PARSE_DECISIONS:
decision_parser = DecisionParser(filename, coverage, src_lines, logger)
decision_parser.parse_all_lines()
_report_lines_with_errors(lines_with_errors, context)
return coverage
|
def parse_coverage(
lines: List[str],
*,
filename: str,
logger: Logger,
exclude_lines_by_pattern: Optional[str],
exclude_pattern_prefix: Optional[str],
flags: ParserFlags,
) -> FileCoverage:
"""
Extract coverage data from a gcov report.
Logging:
Parse problems are reported as warnings.
Coverage exclusion decisions are reported as verbose messages.
Arguments:
lines: the lines of the file to be parsed (excluding newlines)
filename: for error reports
logger: for error reports
exclude_lines_by_pattern: string with regex syntax to exclude
individual lines
exclude_pattern_prefix: string with prefix for _LINE/_START/_STOP markers (default present when None is passed)
flags: various choices for the parser behavior
Returns:
the coverage data
Raises:
Any exceptions during parsing, unless `ParserFlags.IGNORE_PARSE_ERRORS`
is enabled.
"""
context = _Context(flags, logger, filename)
lines_with_errors: List[_LineWithError] = []
tokenized_lines: List[Tuple[_Line, str]] = []
for raw_line in lines:
# empty lines shouldn't occur in reality, but are common in testing
if not raw_line:
continue
try:
tokenized_lines.append((_parse_line(raw_line), raw_line))
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
if flags & ParserFlags.RESPECT_EXCLUSION_MARKERS or flags & ParserFlags.PARSE_DECISIONS:
src_lines = [
(line.lineno, line.source_code)
for line, _ in tokenized_lines
if isinstance(line, _SourceLine)
]
if flags & ParserFlags.RESPECT_EXCLUSION_MARKERS:
line_is_excluded = _find_excluded_ranges(
lines=src_lines,
warnings=_ExclusionRangeWarnings(logger, filename),
exclude_lines_by_pattern=exclude_lines_by_pattern,
exclude_pattern_prefix=exclude_pattern_prefix,
)
else:
line_is_excluded = _make_is_in_any_range([])
coverage = FileCoverage(filename)
state = _ParserState()
for line, raw_line in tokenized_lines:
try:
state = _gather_coverage_from_line(
state,
line,
coverage=coverage,
line_is_excluded=line_is_excluded,
context=context,
)
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
state = _ParserState(is_recovering=True)
# Clean up the final state. This shouldn't happen,
# but the last line could theoretically contain pending function lines
for function in state.deferred_functions:
_add_coverage_for_function(coverage, state.lineno + 1, function, context)
if flags & ParserFlags.PARSE_DECISIONS:
decision_parser = DecisionParser(filename, coverage, src_lines, logger)
decision_parser.parse_all_lines()
_report_lines_with_errors(lines_with_errors, context)
return coverage
|
59,180 |
def _create_tree(fullmodule, path, fname, source, tree, inpackage):
"""Return the tree for a particular module.
fullmodule (full module name), inpackage+module, becomes o.module.
path is passed to recursive calls of _readmodule.
fname becomes o.file.
source is tokenized. Imports cause recursive calls to _readmodule.
tree is {} or {'__path__': <submodule search locations>}.
inpackage, None or string, is passed to recursive calls of _readmodule.
The effect of recursive calls is mutation of global _modules.
"""
f = io.StringIO(source)
# stack = [] # Initialize stack of (class, indent) pairs.
stack = Stack() # changing the source code so as to get the ending line
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, func_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip def with syntax error.
cur_func = None
if stack:
cur_obj = stack[-1][0]
cur_func = _nest_function(cur_obj, func_name, lineno)
else:
# It is just a function.
cur_func = Function(fullmodule, func_name, fname, lineno)
tree[func_name] = cur_func
stack.append((cur_func, thisindent))
elif token == 'class':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip class with syntax error.
# Parse what follows the class name.
tokentype, token, start = next(g)[0:3]
inherit = None
if token == '(':
names = [] # Initialize list of superclasses.
level = 1
super = [] # Tokens making up current superclass.
while True:
tokentype, token, start = next(g)[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in tree:
# We know this super class.
n = tree[n]
else:
c = n.split('.')
if len(c) > 1:
# Super class form is module.class:
# look in module for class.
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# Only use NAME and OP (== dot) tokens for type name.
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# Expressions in the base list are not supported.
inherit = names
if stack:
cur_obj = stack[-1][0]
cur_class = _nest_class(
cur_obj, class_name, lineno, inherit)
else:
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
tree[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module.
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module.
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# Add any classes that were defined in the imported module
# to our name space if they were mentioned in the list.
for n, n2 in names:
if n in d:
tree[n2 or n] = d[n]
elif n == '*':
# Don't add names that start with _.
for n in d:
if n[0] != '_':
tree[n] = d[n]
except StopIteration:
pass
f.close()
return tree
|
def _create_tree(fullmodule, path, fname, source, tree, inpackage):
"""Return the tree for a particular module.
fullmodule (full module name), inpackage+module, becomes o.module.
path is passed to recursive calls of _readmodule.
fname becomes o.file.
source is tokenized. Imports cause recursive calls to _readmodule.
tree is {} or {'__path__': <submodule search locations>}.
inpackage, None or string, is passed to recursive calls of _readmodule.
The effect of recursive calls is mutation of global _modules.
"""
f = io.StringIO(source)
stack = Stack() # changing the source code so as to get the ending line
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, func_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip def with syntax error.
cur_func = None
if stack:
cur_obj = stack[-1][0]
cur_func = _nest_function(cur_obj, func_name, lineno)
else:
# It is just a function.
cur_func = Function(fullmodule, func_name, fname, lineno)
tree[func_name] = cur_func
stack.append((cur_func, thisindent))
elif token == 'class':
lineno, thisindent = start
# Close previous nested classes and defs.
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Skip class with syntax error.
# Parse what follows the class name.
tokentype, token, start = next(g)[0:3]
inherit = None
if token == '(':
names = [] # Initialize list of superclasses.
level = 1
super = [] # Tokens making up current superclass.
while True:
tokentype, token, start = next(g)[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in tree:
# We know this super class.
n = tree[n]
else:
c = n.split('.')
if len(c) > 1:
# Super class form is module.class:
# look in module for class.
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# Only use NAME and OP (== dot) tokens for type name.
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# Expressions in the base list are not supported.
inherit = names
if stack:
cur_obj = stack[-1][0]
cur_class = _nest_class(
cur_obj, class_name, lineno, inherit)
else:
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
tree[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module.
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module.
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# Add any classes that were defined in the imported module
# to our name space if they were mentioned in the list.
for n, n2 in names:
if n in d:
tree[n2 or n] = d[n]
elif n == '*':
# Don't add names that start with _.
for n in d:
if n[0] != '_':
tree[n] = d[n]
except StopIteration:
pass
f.close()
return tree
|
11,492 |
def sample_create_project():
from azure.ai.language.text.authoring import TextAuthoringClient
from azure.core.credentials import AzureKeyCredential
import os
endpoint = os.environ["AZURE_TEXT_AUTHORING_ENDPOINT"]
key = os.environ["AZURE_TEXT_AUTHORING_KEY"]
storageContainer = os.environ["AZURE_TEXT_AUTHORING_STORAGE"]
client = TextAuthoringClient(endpoint, AzureKeyCredential(key)).text_analysis_authoring
project_name = "Project_Name"
project_body = {
"projectName": project_name,
"language": "en",
"projectKind": "customSingleLabelClassification",
"description": "Test Project",
"multilingual": "True",
"storageInputContainerName": storageContainer
}
client.create_project(project_name, project_body)
created_projects = client.list_projects()
created_projects_names = map(lambda project: project["projectName"], created_projects)
if(project_name in created_projects_names):
print("The project is created successfully")
else:
print("An error has occured")
|
def sample_create_project():
from azure.ai.language.text.authoring import TextAuthoringClient
from azure.core.credentials import AzureKeyCredential
import os
endpoint = os.environ["AZURE_TEXT_AUTHORING_ENDPOINT"]
key = os.environ["AZURE_TEXT_AUTHORING_KEY"]
storageContainer = os.environ["AZURE_TEXT_AUTHORING_STORAGE"]
client = TextAuthoringClient(endpoint, AzureKeyCredential(key)).text_analysis_authoring
project_name = "Project_Name"
project_body = {
"projectName": project_name,
"language": "en",
"projectKind": "customSingleLabelClassification",
"description": "Test Project",
"multilingual": True,
"storageInputContainerName": storageContainer
}
client.create_project(project_name, project_body)
created_projects = client.list_projects()
created_projects_names = map(lambda project: project["projectName"], created_projects)
if(project_name in created_projects_names):
print("The project is created successfully")
else:
print("An error has occured")
|
36,323 |
def validate_plugin_manifest(manifest, _callback, silent):
errors = []
for k in (
'name', 'release', 'pkgs', 'packagesite', 'fingerprints', 'artifact',
):
if k not in manifest:
errors.append(f'Missing "{k}" key in manifest')
if 'devfs_ruleset' in manifest:
if not isinstance(manifest['devfs_ruleset'], dict):
errors.append('"devfs_ruleset" must be a dictionary')
else:
devfs_ruleset = manifest['devfs_ruleset']
if 'paths' not in devfs_ruleset:
errors.append('Key "paths" not specified in devfs_ruleset')
elif not isinstance(devfs_ruleset['paths'], dict):
errors.append('"devfs_ruleset.paths" should be a valid dictionary')
if 'includes' in devfs_ruleset and not isinstance(devfs_ruleset['includes'], list):
errors.append('"devfs_ruleset.includes" should be a valid list')
if errors:
errors = '\n'.join(errors)
logit(
{
'level': 'EXCEPTION',
'msg': f'Following errors were encountered with plugin manifest:\n{errors}'
},
_callback=_callback,
silent=silent,
)
|
def validate_plugin_manifest(manifest, _callback, silent):
errors = []
for k in (
'name', 'release', 'pkgs', 'packagesite', 'fingerprints', 'artifact',
):
if k not in manifest:
errors.append(f'Missing "{k}" key in manifest')
if 'devfs_ruleset' in manifest:
if not isinstance(manifest['devfs_ruleset'], dict):
errors.append('"devfs_ruleset" must be a dictionary')
else:
devfs_ruleset = manifest['devfs_ruleset']
if 'paths' not in devfs_ruleset:
errors.append('Key "paths" not specified in devfs_ruleset')
elif not isinstance(devfs_ruleset['paths'], dict):
errors.append('"devfs_ruleset.paths" should be a valid dictionary')
if 'includes' in devfs_ruleset and not isinstance(devfs_ruleset['includes'], list):
errors.append('"devfs_ruleset.includes" should be a valid list')
if errors:
errors = '\n'.join(errors)
logit(
{
'level': 'EXCEPTION',
'msg': f'The following errors were encountered with the plugin manifest:\n{errors}'
},
_callback=_callback,
silent=silent,
)
|
9,061 |
def action_command(*command_list: str) -> Callable:
"""Decorate a function to trigger on CTCP ACTION lines.
:param str command_list: one or more command name(s) to match
This decorator can be used to add multiple commands to one callable in a
single line. The resulting match object will have the command as the first
group; the rest of the line, excluding leading whitespace, as the second
group; and parameters 1 through 4, separated by whitespace, as groups 3-6.
Example::
@action_command("hello!")
# Would trigger on "/me hello!"
.. versionadded:: 7.0
.. note::
The command name will be escaped to be used in a regex command. As such
it is not possible to use something like ``/me command\\d+`` to catch
something like ``/me command1`` or ``/me command2``.
You have several options at your disposal to replace a regex in the
command name:
* use a command alias,
* parse the arguments with your own regex within your plugin callable,
* use a :func:`rule`,
the rule must be used with the :func:`ctcp` decorator::
@rule(r'hello!?')
@ctcp('ACTION')
# Would trigger on "/me hello!" and "/me hello"
"""
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, 'action_commands'):
function.action_commands = []
for cmd in command_list:
if cmd not in function.action_commands:
function.action_commands.append(cmd)
return function
return add_attribute
|
def action_command(*command_list: str) -> Callable:
"""Decorate a function to trigger on CTCP ACTION lines.
:param str command_list: one or more command name(s) to match
This decorator can be used to add multiple commands to one callable in a
single line. The resulting match object will have the command as the first
group; the rest of the line, excluding leading whitespace, as the second
group; and parameters 1 through 4, separated by whitespace, as groups 3-6.
Example::
@action_command("hello!")
# Would trigger on "/me hello!"
.. versionadded:: 7.0
.. note::
The command name will be escaped to be used in a regex command. As such
it is not possible to use something like ``/me command\\d+`` to catch
something like ``/me command1`` or ``/me command2``.
You have several options at your disposal to replace a regex in the
command name:
* use a command alias,
* parse the arguments with your own regex within your plugin callable,
* use a :func:`rule`,
The :func:`rule` must be used with the :func:`ctcp` decorator::
@rule(r'hello!?')
@ctcp('ACTION')
# Would trigger on "/me hello!" and "/me hello"
"""
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, 'action_commands'):
function.action_commands = []
for cmd in command_list:
if cmd not in function.action_commands:
function.action_commands.append(cmd)
return function
return add_attribute
|
57,531 |
def create_ssl_context(
certfile, keyfile, password, ssl_version, cert_reqs, ca_certs, ciphers
):
ctx = ssl.SSLContext(ssl_version)
if password:
def getpassword():
return password
ctx.load_cert_chain(certfile, keyfile, getpassword)
else:
ctx.load_cert_chain(certfile, keyfile)
ctx.verify_mode = cert_reqs
if ca_certs:
ctx.load_verify_locations(ca_certs)
if ciphers:
ctx.set_ciphers(ciphers)
return ctx
|
def create_ssl_context(
certfile, keyfile, password, ssl_version, cert_reqs, ca_certs, ciphers
):
ctx = ssl.SSLContext(ssl_version)
password = (lambda: password) if password else None
ctx.load_cert_chain(certfile, keyfile, password)
ctx.verify_mode = cert_reqs
if ca_certs:
ctx.load_verify_locations(ca_certs)
if ciphers:
ctx.set_ciphers(ciphers)
return ctx
|
6,871 |
def send_email(success, service_name, doctype, email_field, error_status=None):
recipients = get_recipients(doctype, email_field)
if not recipients:
frappe.log_error(
"No Email Recipient found for {0}".format(service_name),
"{0}: Failed to send backup status email".format(service_name),
)
return
if success:
if not int(frappe.db.get_value(doctype, None, "send_email_for_successful_backup")):
return
subject = "Backup Upload Successful"
message = """
<h3>Backup Uploaded Successfully!</h3>
<p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>""".format(
service_name
)
else:
subject = "[Warning] Backup Upload Failed"
message = """
<h3>Backup Upload Failed!</h3>
<p>Oops, your automated backup to {0} failed.</p>
<p>Error message: {1}</p>
<p>Please contact your system manager for more information.</p>""".format(
service_name, error_status
)
frappe.sendmail(recipients=recipients, subject=subject, message=message)
|
def send_email(success, service_name, doctype, email_field, error_status=None):
recipients = get_recipients(doctype, email_field)
if not recipients:
frappe.log_error(
"No Email Recipient found for {0}".format(service_name),
"{0}: Failed to send backup status email".format(service_name),
)
return
if success:
if not frappe.db.get_single_value(doctype, "send_email_for_successful_backup"):
return
subject = "Backup Upload Successful"
message = """
<h3>Backup Uploaded Successfully!</h3>
<p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>""".format(
service_name
)
else:
subject = "[Warning] Backup Upload Failed"
message = """
<h3>Backup Upload Failed!</h3>
<p>Oops, your automated backup to {0} failed.</p>
<p>Error message: {1}</p>
<p>Please contact your system manager for more information.</p>""".format(
service_name, error_status
)
frappe.sendmail(recipients=recipients, subject=subject, message=message)
|
9,617 |
def main():
module = AnsibleModule(
argument_spec=dict(
type=dict(required=True, choices=['user', 'group', 'project']),
name=dict(required=False, default=None),
mountpoint=dict(required=True),
bhard=dict(required=False, default=None),
bsoft=dict(required=False, default=None),
ihard=dict(required=False, default=None),
isoft=dict(required=False, default=None),
rtbhard=dict(required=False, default=None),
rtbsoft=dict(required=False, default=None),
state=dict(required=False, default='present', choices=['present', 'absent'])
),
supports_check_mode=True
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
changed = False
if os.getuid() != 0:
module.fail_json(msg='You need to be root to run this module')
if not os.path.ismount(mountpoint):
module.fail_json(msg='%s is not a mountpoint' % mountpoint)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] \
and 'usrquota' not in mp['mntopts'] \
and 'quota' not in mp['mntopts'] \
and 'uqnoenforce' not in mp['mntopts'] \
and 'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.'
% mountpoint
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
if quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)'
% (mountpoint, mp['mntopts'])
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg='/etc/projects doesn\'t exist.')
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg='/etc/projid doesn\'t exist.')
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r)
else:
for line in r['stdout']:
if '%s - project identifier is not set' in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r)
else:
changed = True
elif not prj_set and module.check_mode:
changed = True
changed = False
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
if bsoft is not None or bhard is not None:
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
if isoft is not None or ihard is not None:
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
if rtbsoft is not None or rtbhard is not None:
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
limit = []
if bsoft is not None and int(bsoft / 1024) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
if bhard is not None and int(bhard / 1024) != current_bhard:
limit.append('bhard=%s' % bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r)
else:
changed = True
elif len(limit) > 0 and module.check_mode:
changed = True
module.exit_json(changed=changed)
return True
|
def main():
module = AnsibleModule(
argument_spec=dict(
type=dict(required=True, choices=['user', 'group', 'project']),
name=dict(required=False, default=None),
mountpoint=dict(required=True),
bhard=dict(required=False, default=None),
bsoft=dict(required=False, default=None),
ihard=dict(required=False, default=None),
isoft=dict(required=False, default=None),
rtbhard=dict(required=False, default=None),
rtbsoft=dict(required=False, default=None),
state=dict(type='str', default='present', choices=['absent', 'present'])
),
supports_check_mode=True
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
changed = False
if os.getuid() != 0:
module.fail_json(msg='You need to be root to run this module')
if not os.path.ismount(mountpoint):
module.fail_json(msg='%s is not a mountpoint' % mountpoint)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] \
and 'usrquota' not in mp['mntopts'] \
and 'quota' not in mp['mntopts'] \
and 'uqnoenforce' not in mp['mntopts'] \
and 'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.'
% mountpoint
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
if quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)'
% (mountpoint, mp['mntopts'])
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg='/etc/projects doesn\'t exist.')
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg='/etc/projid doesn\'t exist.')
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r)
else:
for line in r['stdout']:
if '%s - project identifier is not set' in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r)
else:
changed = True
elif not prj_set and module.check_mode:
changed = True
changed = False
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
if bsoft is not None or bhard is not None:
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
if isoft is not None or ihard is not None:
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
if rtbsoft is not None or rtbhard is not None:
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
limit = []
if bsoft is not None and int(bsoft / 1024) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
if bhard is not None and int(bhard / 1024) != current_bhard:
limit.append('bhard=%s' % bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r)
else:
changed = True
elif len(limit) > 0 and module.check_mode:
changed = True
module.exit_json(changed=changed)
return True
|
44,765 |
def _get_databricks_run_cmd(dbfs_fuse_tar_uri, run_id, entry_point, parameters):
"""
Generate MLflow CLI command to run on Databricks cluster in order to launch a run on Databricks.
"""
# Strip ".gz" and ".tar" file extensions from base filename of the tarfile
tar_hash = posixpath.splitext(posixpath.splitext(posixpath.basename(dbfs_fuse_tar_uri))[0])[0]
container_tar_path = posixpath.abspath(posixpath.join(DB_TARFILE_BASE,
posixpath.basename(dbfs_fuse_tar_uri)))
project_dir = posixpath.join(DB_PROJECTS_BASE, tar_hash)
mlflow_run_arr = list(map(shlex_quote, ["mlflow", "run", project_dir,
"--entry-point", entry_point]))
if run_id:
mlflow_run_arr.extend(["--run-id", run_id])
if parameters:
for key, value in parameters.items():
mlflow_run_arr.extend(["-P", "%s=%s" % (key, value)])
mlflow_run_cmd = " ".join(mlflow_run_arr)
shell_command = textwrap.dedent("""
mlflow --version &&
# Make local directories in the container into which to copy/extract the tarred project
mkdir -p {tarfile_base} {projects_base} &&
# Rsync from DBFS FUSE to avoid copying archive into local filesystem if it already exists
rsync -a -v --ignore-existing {dbfs_fuse_tar_path} {tarfile_base} &&
# Extract project into a temporary directory. We don't extract directly into the desired
# directory as tar extraction isn't guaranteed to be atomic
cd $(mktemp -d) &&
tar --no-same-owner -xzvf {container_tar_path} &&
# Atomically move the extracted project into the desired directory
mv -T {tarfile_archive_name} {work_dir} &&
{mlflow_run}
""".format(tarfile_base=DB_TARFILE_BASE, projects_base=DB_PROJECTS_BASE,
dbfs_fuse_tar_path=dbfs_fuse_tar_uri, container_tar_path=container_tar_path,
tarfile_archive_name=DB_TARFILE_ARCHIVE_NAME, work_dir=project_dir,
mlflow_run=mlflow_run_cmd))
return ["bash", "-c", shell_command]
|
def _get_databricks_run_cmd(dbfs_fuse_tar_uri, run_id, entry_point, parameters):
"""
Generate MLflow CLI command to run on Databricks cluster in order to launch a run on Databricks.
"""
# Strip ".gz" and ".tar" file extensions from base filename of the tarfile
tar_hash = posixpath.splitext(posixpath.splitext(posixpath.basename(dbfs_fuse_tar_uri))[0])[0]
container_tar_path = posixpath.abspath(posixpath.join(DB_TARFILE_BASE,
posixpath.basename(dbfs_fuse_tar_uri)))
project_dir = posixpath.join(DB_PROJECTS_BASE, tar_hash)
mlflow_run_arr = list(map(shlex_quote, ["mlflow", "run", project_dir,
"--entry-point", entry_point]))
if run_id:
mlflow_run_arr.extend(["--run-id", run_id])
if parameters:
for key, value in parameters.items():
mlflow_run_arr.extend(["-P", "%s=%s" % (key, value)])
mlflow_run_cmd = " ".join(mlflow_run_arr)
shell_command = textwrap.dedent("""
mlflow --version &&
# Make local directories in the container into which to copy/extract the tarred project
mkdir -p {tarfile_base} {projects_base} &&
# Rsync from DBFS FUSE to avoid copying archive into local filesystem if it already exists
rsync -a -v --ignore-existing {dbfs_fuse_tar_path} {tarfile_base} &&
# Extract project into a temporary directory. We don't extract directly into the desired
# directory as tar extraction isn't guaranteed to be atomic
cd $(mktemp -d) &&
export PATH=$PATH:$DB_HOME/python/bin &&
# Atomically move the extracted project into the desired directory
mv -T {tarfile_archive_name} {work_dir} &&
{mlflow_run}
""".format(tarfile_base=DB_TARFILE_BASE, projects_base=DB_PROJECTS_BASE,
dbfs_fuse_tar_path=dbfs_fuse_tar_uri, container_tar_path=container_tar_path,
tarfile_archive_name=DB_TARFILE_ARCHIVE_NAME, work_dir=project_dir,
mlflow_run=mlflow_run_cmd))
return ["bash", "-c", shell_command]
|
20,038 |
def test_plantcv_visualize_size():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_size_mismatch")
os.mkdir(cache_dir)
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
visualization = pcv.visualize.sizes(img=img, mask=img, num_objects=4)
assert len(np.unique(visualization)) == 4
|
def test_plantcv_visualize_size():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_sizes")
os.mkdir(cache_dir)
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
visualization = pcv.visualize.sizes(img=img, mask=img, num_objects=4)
assert len(np.unique(visualization)) == 4
|
14,430 |
def resize(clip, new_size=None, height=None, width=None, apply_to_mask=True):
"""Returns a video clip that is a resized version of the clip.
Parameters
----------
new_size : tuple, optional
Can be either
- ``(width, height)`` in pixels or a float representing
- A scaling factor, like ``0.5``.
- A function of time returning one of these.
width : int, optional
Width of the new clip in pixels. The height is then computed so
that the width/height ratio is conserved.
height : int, optional
Height of the new clip in pixels. The width is then computed so
that the width/height ratio is conserved.
Examples
--------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if new_size is not None:
def translate_new_size(new_size_):
"""Returns a [w, h] pair from `new_size_`. If `new_size_` is a
scalar, then work out the correct pair using the clip's size.
Otherwise just return `new_size_`
"""
if isinstance(new_size_, (int, float)):
return [new_size_ * w, new_size_ * h]
else:
return new_size_
if hasattr(new_size, "__call__"):
# The resizing is a function of time
def get_new_size(t):
return translate_new_size(new_size(t))
if clip.is_mask:
def filter(get_frame, t):
return (
resizer((255 * get_frame(t)).astype("uint8"), get_new_size(t))
/ 255.0
)
else:
def filter(get_frame, t):
return resizer(get_frame(t).astype("uint8"), get_new_size(t))
newclip = clip.transform(
filter, keep_duration=True, apply_to=(["mask"] if apply_to_mask else [])
)
if apply_to_mask and clip.mask is not None:
newclip.mask = resize(clip.mask, new_size, apply_to_mask=False)
return newclip
else:
new_size = translate_new_size(new_size)
elif height is not None:
if hasattr(height, "__call__"):
def func(t):
return 1.0 * int(height(t)) / h
return resize(clip, func)
else:
new_size = [w * height / h, height]
elif width is not None:
if hasattr(width, "__call__"):
def func(t):
return 1.0 * width(t) / w
return resize(clip, func)
else:
new_size = [width, h * width / w]
# From here, the resizing is constant (not a function of time), size=newsize
if clip.is_mask:
def image_filter(pic):
return 1.0 * resizer((255 * pic).astype("uint8"), new_size) / 255.0
else:
def image_filter(pic):
return resizer(pic.astype("uint8"), new_size)
new_clip = clip.image_transform(image_filter)
if apply_to_mask and clip.mask is not None:
new_clip.mask = resize(clip.mask, new_size, apply_to_mask=False)
return new_clip
|
def resize(clip, new_size=None, height=None, width=None, apply_to_mask=True):
"""Returns a video clip that is a resized version of the clip.
Parameters
----------
new_size : tuple or float or function, optional
Can be either
- ``(width, height)`` in pixels or a float representing
- A scaling factor, like ``0.5``.
- A function of time returning one of these.
width : int, optional
Width of the new clip in pixels. The height is then computed so
that the width/height ratio is conserved.
height : int, optional
Height of the new clip in pixels. The width is then computed so
that the width/height ratio is conserved.
Examples
--------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if new_size is not None:
def translate_new_size(new_size_):
"""Returns a [w, h] pair from `new_size_`. If `new_size_` is a
scalar, then work out the correct pair using the clip's size.
Otherwise just return `new_size_`
"""
if isinstance(new_size_, (int, float)):
return [new_size_ * w, new_size_ * h]
else:
return new_size_
if hasattr(new_size, "__call__"):
# The resizing is a function of time
def get_new_size(t):
return translate_new_size(new_size(t))
if clip.is_mask:
def filter(get_frame, t):
return (
resizer((255 * get_frame(t)).astype("uint8"), get_new_size(t))
/ 255.0
)
else:
def filter(get_frame, t):
return resizer(get_frame(t).astype("uint8"), get_new_size(t))
newclip = clip.transform(
filter, keep_duration=True, apply_to=(["mask"] if apply_to_mask else [])
)
if apply_to_mask and clip.mask is not None:
newclip.mask = resize(clip.mask, new_size, apply_to_mask=False)
return newclip
else:
new_size = translate_new_size(new_size)
elif height is not None:
if hasattr(height, "__call__"):
def func(t):
return 1.0 * int(height(t)) / h
return resize(clip, func)
else:
new_size = [w * height / h, height]
elif width is not None:
if hasattr(width, "__call__"):
def func(t):
return 1.0 * width(t) / w
return resize(clip, func)
else:
new_size = [width, h * width / w]
# From here, the resizing is constant (not a function of time), size=newsize
if clip.is_mask:
def image_filter(pic):
return 1.0 * resizer((255 * pic).astype("uint8"), new_size) / 255.0
else:
def image_filter(pic):
return resizer(pic.astype("uint8"), new_size)
new_clip = clip.image_transform(image_filter)
if apply_to_mask and clip.mask is not None:
new_clip.mask = resize(clip.mask, new_size, apply_to_mask=False)
return new_clip
|
54,600 |
def create_training_dataset(
config,
num_shuffles=1,
Shuffles=None,
windows2linux=False,
userfeedback=False,
trainIndices=None,
testIndices=None,
net_type=None,
augmenter_type=None,
posecfg_template=None,
):
"""Creates a training dataset.
Labels from all the extracted frames are merged into a single .h5 file.
Only the videos included in the config file are used to create this dataset.
Parameters
----------
config : string
Full path of the ``config.yaml`` file as a string.
num_shuffles : int, optional, default=1
Number of shuffles of training dataset to create, i.e. ``[1,2,3]`` for
``num_shuffles=3``.
Shuffles: list[int], optional
Alternatively the user can also give a list of shuffles.
userfeedback: bool, optional, default=False
If ``False``, all requested train/test splits are created (no matter if they
already exist). If you want to assure that previous splits etc. are not
overwritten, set this to ``True`` and you will be asked for each split.
trainIndices: list of lists, optional, default=None
List of one or multiple lists containing train indexes.
A list containing two lists of training indexes will produce two splits.
testIndices: list of lists, optional, default=None
List of one or multiple lists containing test indexes.
net_type: list, optional, default=None
Type of networks. Currently supported options are
* ``resnet_50``
* ``resnet_101``
* ``resnet_152``
* ``mobilenet_v2_1.0``
* ``mobilenet_v2_0.75``
* ``mobilenet_v2_0.5``
* ``mobilenet_v2_0.35``
* ``efficientnet-b0``
* ``efficientnet-b1``
* ``efficientnet-b2``
* ``efficientnet-b3``
* ``efficientnet-b4``
* ``efficientnet-b5``
* ``efficientnet-b6``
augmenter_type: string, optional, default=None
Type of augmenter. Currently supported augmenters are
* ``default``
* ``scalecrop``
* ``imgaug``
* ``tensorpack``
* ``deterministic``
posecfg_template: string, optional, default=None
Path to a ``pose_cfg.yaml`` file to use as a template for generating the new
one for the current iteration. Useful if you would like to start with the same
parameters a previous training iteration. None uses the default
``pose_cfg.yaml``.
Returns
-------
list(tuple) or None
If training dataset was successfully created, a list of tuples is returned.
The first two elements in each tuple represent the training fraction and the
shuffle value. The last two elements in each tuple are arrays of integers
representing the training and test indices.
Returns None if training dataset could not be created.
Notes
-----
Use the function ``add_new_video`` at any stage of the project to add more videos
to the project.
Examples
--------
Linux/MacOS
>>> deeplabcut.create_training_dataset(
'/analysis/project/reaching-task/config.yaml', num_shuffles=1,
)
Windows
>>> deeplabcut.create_training_dataset(
'C:\\Users\\Ulf\\looming-task\\config.yaml', Shuffles=[3,17,5],
)
"""
import scipy.io as sio
if windows2linux:
# DeprecationWarnings are silenced since Python 3.2 unless triggered in __main__
warnings.warn(
"`windows2linux` has no effect since 2.2.0.4 and will be removed in 2.2.1.",
FutureWarning,
)
# Loading metadata from config file:
cfg = auxiliaryfunctions.read_config(config)
if posecfg_template:
if not posecfg_template.endswith("pose_cfg.yaml"):
raise ValueError(
"posecfg_template argument must contain path to a pose_cfg.yaml file"
)
else:
print("Reloading pose_cfg parameters from " + posecfg_template +'\n')
from deeplabcut.utils.auxiliaryfunctions import read_plainconfig
prior_cfg = read_plainconfig(posecfg_template)
if cfg.get("multianimalproject", False):
from deeplabcut.generate_training_dataset.multiple_individuals_trainingsetmanipulation import (
create_multianimaltraining_dataset,
)
create_multianimaltraining_dataset(
config, num_shuffles, Shuffles, net_type=net_type
)
else:
scorer = cfg["scorer"]
project_path = cfg["project_path"]
# Create path for training sets & store data there
trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
cfg
) # Path concatenation OS platform independent
auxiliaryfunctions.attempttomakefolder(
Path(os.path.join(project_path, str(trainingsetfolder))), recursive=True
)
Data = merge_annotateddatasets(
cfg,
Path(os.path.join(project_path, trainingsetfolder)),
)
if Data is None:
return
Data = Data[scorer] # extract labeled data
# loading & linking pretrained models
if net_type is None: # loading & linking pretrained models
net_type = cfg.get("default_net_type", "resnet_50")
else:
if (
"resnet" in net_type
or "mobilenet" in net_type
or "efficientnet" in net_type
):
pass
else:
raise ValueError("Invalid network type:", net_type)
if augmenter_type is None:
augmenter_type = cfg.get("default_augmenter", "imgaug")
if augmenter_type is None: # this could be in config.yaml for old projects!
# updating variable if null/None! #backwardscompatability
auxiliaryfunctions.edit_config(config, {"default_augmenter": "imgaug"})
augmenter_type = "imgaug"
elif augmenter_type not in [
"default",
"scalecrop",
"imgaug",
"tensorpack",
"deterministic",
]:
raise ValueError("Invalid augmenter type:", augmenter_type)
if posecfg_template:
if net_type != prior_cfg["net_type"]:
print(
"WARNING: Specified net_type does not match net_type from posecfg_template path entered. Proceed with caution."
)
if augmenter_type != prior_cfg["dataset_type"]:
print(
"WARNING: Specified augmenter_type does not match dataset_type from posecfg_template path entered. Proceed with caution."
)
# Loading the encoder (if necessary downloading from TF)
dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
if not posecfg_template:
defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
elif posecfg_template:
defaultconfigfile = posecfg_template
model_path, num_shuffles = auxfun_models.Check4weights(
net_type, Path(dlcparent_path), num_shuffles
)
if Shuffles is None:
Shuffles = range(1, num_shuffles + 1)
else:
Shuffles = [i for i in Shuffles if isinstance(i, int)]
# print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
if trainIndices is None and testIndices is None:
splits = [
(
trainFraction,
shuffle,
SplitTrials(range(len(Data.index)), trainFraction),
)
for trainFraction in cfg["TrainingFraction"]
for shuffle in Shuffles
]
else:
if len(trainIndices) != len(testIndices) != len(Shuffles):
raise ValueError(
"Number of Shuffles and train and test indexes should be equal."
)
splits = []
for shuffle, (train_inds, test_inds) in enumerate(
zip(trainIndices, testIndices)
):
trainFraction = round(
len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)), 2
)
print(
f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
)
# Now that the training fraction is guaranteed to be correct,
# the values added to pad the indices are removed.
train_inds = np.asarray(train_inds)
train_inds = train_inds[train_inds != -1]
test_inds = np.asarray(test_inds)
test_inds = test_inds[test_inds != -1]
splits.append(
(trainFraction, Shuffles[shuffle], (train_inds, test_inds))
)
bodyparts = cfg["bodyparts"]
nbodyparts = len(bodyparts)
for trainFraction, shuffle, (trainIndices, testIndices) in splits:
if len(trainIndices) > 0:
if userfeedback:
trainposeconfigfile, _, _ = training.return_train_network_path(
config,
shuffle=shuffle,
trainingsetindex=cfg["TrainingFraction"].index(trainFraction),
)
if trainposeconfigfile.is_file():
askuser = input(
"The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): "
)
if (
askuser == "no"
or askuser == "No"
or askuser == "N"
or askuser == "No"
):
raise Exception(
"Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details."
)
####################################################
# Generating data structure with labeled information & frame metadata (for deep cut)
####################################################
# Make training file!
(
datafilename,
metadatafilename,
) = auxiliaryfunctions.GetDataandMetaDataFilenames(
trainingsetfolder, trainFraction, shuffle, cfg
)
################################################################################
# Saving data file (convert to training file for deeper cut (*.mat))
################################################################################
data, MatlabData = format_training_data(
Data, trainIndices, nbodyparts, project_path
)
sio.savemat(
os.path.join(project_path, datafilename), {"dataset": MatlabData}
)
################################################################################
# Saving metadata (Pickle file)
################################################################################
auxiliaryfunctions.SaveMetadata(
os.path.join(project_path, metadatafilename),
data,
trainIndices,
testIndices,
trainFraction,
)
################################################################################
# Creating file structure for training &
# Test files as well as pose_yaml files (containing training and testing information)
#################################################################################
modelfoldername = auxiliaryfunctions.get_model_folder(
trainFraction, shuffle, cfg
)
auxiliaryfunctions.attempttomakefolder(
Path(config).parents[0] / modelfoldername, recursive=True
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/train"
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/test"
)
path_train_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"train",
"pose_cfg.yaml",
)
)
path_test_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"test",
"pose_cfg.yaml",
)
)
# str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test' / 'pose_cfg.yaml')
items2change = {
"dataset": datafilename,
"metadataset": metadatafilename,
"num_joints": len(bodyparts),
"all_joints": [[i] for i in range(len(bodyparts))],
"all_joints_names": [str(bpt) for bpt in bodyparts],
"init_weights": model_path,
"project_path": str(cfg["project_path"]),
"net_type": net_type,
"dataset_type": augmenter_type,
}
items2drop = {}
if augmenter_type == "scalecrop":
# these values are dropped as scalecrop
# doesn't have rotation implemented
items2drop = {"rotation": 0, "rotratio": 0.0}
# Also drop maDLC smart cropping augmentation parameters
for key in ["pre_resize", "crop_size", "max_shift", "crop_sampling"]:
items2drop[key] = None
trainingdata = MakeTrain_pose_yaml(
items2change, path_train_config, defaultconfigfile, items2drop
)
keys2save = [
"dataset",
"num_joints",
"all_joints",
"all_joints_names",
"net_type",
"init_weights",
"global_scale",
"location_refinement",
"locref_stdev",
]
MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
print(
"The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
)
return splits
|
def create_training_dataset(
config,
num_shuffles=1,
Shuffles=None,
windows2linux=False,
userfeedback=False,
trainIndices=None,
testIndices=None,
net_type=None,
augmenter_type=None,
posecfg_template=None,
):
"""Creates a training dataset.
Labels from all the extracted frames are merged into a single .h5 file.
Only the videos included in the config file are used to create this dataset.
Parameters
----------
config : string
Full path of the ``config.yaml`` file as a string.
num_shuffles : int, optional, default=1
Number of shuffles of training dataset to create, i.e. ``[1,2,3]`` for
``num_shuffles=3``.
Shuffles: list[int], optional
Alternatively the user can also give a list of shuffles.
userfeedback: bool, optional, default=False
If ``False``, all requested train/test splits are created (no matter if they
already exist). If you want to assure that previous splits etc. are not
overwritten, set this to ``True`` and you will be asked for each split.
trainIndices: list of lists, optional, default=None
List of one or multiple lists containing train indexes.
A list containing two lists of training indexes will produce two splits.
testIndices: list of lists, optional, default=None
List of one or multiple lists containing test indexes.
net_type: list, optional, default=None
Type of networks. Currently supported options are
* ``resnet_50``
* ``resnet_101``
* ``resnet_152``
* ``mobilenet_v2_1.0``
* ``mobilenet_v2_0.75``
* ``mobilenet_v2_0.5``
* ``mobilenet_v2_0.35``
* ``efficientnet-b0``
* ``efficientnet-b1``
* ``efficientnet-b2``
* ``efficientnet-b3``
* ``efficientnet-b4``
* ``efficientnet-b5``
* ``efficientnet-b6``
augmenter_type: string, optional, default=None
Type of augmenter. Currently supported augmenters are
* ``default``
* ``scalecrop``
* ``imgaug``
* ``tensorpack``
* ``deterministic``
posecfg_template: string, optional, default=None
Path to a ``pose_cfg.yaml`` file to use as a template for generating the new
one for the current iteration. Useful if you would like to start with the same
parameters a previous training iteration. None uses the default
``pose_cfg.yaml``.
Returns
-------
list(tuple) or None
If training dataset was successfully created, a list of tuples is returned.
The first two elements in each tuple represent the training fraction and the
shuffle value. The last two elements in each tuple are arrays of integers
representing the training and test indices.
Returns None if training dataset could not be created.
Notes
-----
Use the function ``add_new_videos`` at any stage of the project to add more videos
to the project.
Examples
--------
Linux/MacOS
>>> deeplabcut.create_training_dataset(
'/analysis/project/reaching-task/config.yaml', num_shuffles=1,
)
Windows
>>> deeplabcut.create_training_dataset(
'C:\\Users\\Ulf\\looming-task\\config.yaml', Shuffles=[3,17,5],
)
"""
import scipy.io as sio
if windows2linux:
# DeprecationWarnings are silenced since Python 3.2 unless triggered in __main__
warnings.warn(
"`windows2linux` has no effect since 2.2.0.4 and will be removed in 2.2.1.",
FutureWarning,
)
# Loading metadata from config file:
cfg = auxiliaryfunctions.read_config(config)
if posecfg_template:
if not posecfg_template.endswith("pose_cfg.yaml"):
raise ValueError(
"posecfg_template argument must contain path to a pose_cfg.yaml file"
)
else:
print("Reloading pose_cfg parameters from " + posecfg_template +'\n')
from deeplabcut.utils.auxiliaryfunctions import read_plainconfig
prior_cfg = read_plainconfig(posecfg_template)
if cfg.get("multianimalproject", False):
from deeplabcut.generate_training_dataset.multiple_individuals_trainingsetmanipulation import (
create_multianimaltraining_dataset,
)
create_multianimaltraining_dataset(
config, num_shuffles, Shuffles, net_type=net_type
)
else:
scorer = cfg["scorer"]
project_path = cfg["project_path"]
# Create path for training sets & store data there
trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
cfg
) # Path concatenation OS platform independent
auxiliaryfunctions.attempttomakefolder(
Path(os.path.join(project_path, str(trainingsetfolder))), recursive=True
)
Data = merge_annotateddatasets(
cfg,
Path(os.path.join(project_path, trainingsetfolder)),
)
if Data is None:
return
Data = Data[scorer] # extract labeled data
# loading & linking pretrained models
if net_type is None: # loading & linking pretrained models
net_type = cfg.get("default_net_type", "resnet_50")
else:
if (
"resnet" in net_type
or "mobilenet" in net_type
or "efficientnet" in net_type
):
pass
else:
raise ValueError("Invalid network type:", net_type)
if augmenter_type is None:
augmenter_type = cfg.get("default_augmenter", "imgaug")
if augmenter_type is None: # this could be in config.yaml for old projects!
# updating variable if null/None! #backwardscompatability
auxiliaryfunctions.edit_config(config, {"default_augmenter": "imgaug"})
augmenter_type = "imgaug"
elif augmenter_type not in [
"default",
"scalecrop",
"imgaug",
"tensorpack",
"deterministic",
]:
raise ValueError("Invalid augmenter type:", augmenter_type)
if posecfg_template:
if net_type != prior_cfg["net_type"]:
print(
"WARNING: Specified net_type does not match net_type from posecfg_template path entered. Proceed with caution."
)
if augmenter_type != prior_cfg["dataset_type"]:
print(
"WARNING: Specified augmenter_type does not match dataset_type from posecfg_template path entered. Proceed with caution."
)
# Loading the encoder (if necessary downloading from TF)
dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
if not posecfg_template:
defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
elif posecfg_template:
defaultconfigfile = posecfg_template
model_path, num_shuffles = auxfun_models.Check4weights(
net_type, Path(dlcparent_path), num_shuffles
)
if Shuffles is None:
Shuffles = range(1, num_shuffles + 1)
else:
Shuffles = [i for i in Shuffles if isinstance(i, int)]
# print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
if trainIndices is None and testIndices is None:
splits = [
(
trainFraction,
shuffle,
SplitTrials(range(len(Data.index)), trainFraction),
)
for trainFraction in cfg["TrainingFraction"]
for shuffle in Shuffles
]
else:
if len(trainIndices) != len(testIndices) != len(Shuffles):
raise ValueError(
"Number of Shuffles and train and test indexes should be equal."
)
splits = []
for shuffle, (train_inds, test_inds) in enumerate(
zip(trainIndices, testIndices)
):
trainFraction = round(
len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)), 2
)
print(
f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
)
# Now that the training fraction is guaranteed to be correct,
# the values added to pad the indices are removed.
train_inds = np.asarray(train_inds)
train_inds = train_inds[train_inds != -1]
test_inds = np.asarray(test_inds)
test_inds = test_inds[test_inds != -1]
splits.append(
(trainFraction, Shuffles[shuffle], (train_inds, test_inds))
)
bodyparts = cfg["bodyparts"]
nbodyparts = len(bodyparts)
for trainFraction, shuffle, (trainIndices, testIndices) in splits:
if len(trainIndices) > 0:
if userfeedback:
trainposeconfigfile, _, _ = training.return_train_network_path(
config,
shuffle=shuffle,
trainingsetindex=cfg["TrainingFraction"].index(trainFraction),
)
if trainposeconfigfile.is_file():
askuser = input(
"The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): "
)
if (
askuser == "no"
or askuser == "No"
or askuser == "N"
or askuser == "No"
):
raise Exception(
"Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details."
)
####################################################
# Generating data structure with labeled information & frame metadata (for deep cut)
####################################################
# Make training file!
(
datafilename,
metadatafilename,
) = auxiliaryfunctions.GetDataandMetaDataFilenames(
trainingsetfolder, trainFraction, shuffle, cfg
)
################################################################################
# Saving data file (convert to training file for deeper cut (*.mat))
################################################################################
data, MatlabData = format_training_data(
Data, trainIndices, nbodyparts, project_path
)
sio.savemat(
os.path.join(project_path, datafilename), {"dataset": MatlabData}
)
################################################################################
# Saving metadata (Pickle file)
################################################################################
auxiliaryfunctions.SaveMetadata(
os.path.join(project_path, metadatafilename),
data,
trainIndices,
testIndices,
trainFraction,
)
################################################################################
# Creating file structure for training &
# Test files as well as pose_yaml files (containing training and testing information)
#################################################################################
modelfoldername = auxiliaryfunctions.get_model_folder(
trainFraction, shuffle, cfg
)
auxiliaryfunctions.attempttomakefolder(
Path(config).parents[0] / modelfoldername, recursive=True
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/train"
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/test"
)
path_train_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"train",
"pose_cfg.yaml",
)
)
path_test_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"test",
"pose_cfg.yaml",
)
)
# str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test' / 'pose_cfg.yaml')
items2change = {
"dataset": datafilename,
"metadataset": metadatafilename,
"num_joints": len(bodyparts),
"all_joints": [[i] for i in range(len(bodyparts))],
"all_joints_names": [str(bpt) for bpt in bodyparts],
"init_weights": model_path,
"project_path": str(cfg["project_path"]),
"net_type": net_type,
"dataset_type": augmenter_type,
}
items2drop = {}
if augmenter_type == "scalecrop":
# these values are dropped as scalecrop
# doesn't have rotation implemented
items2drop = {"rotation": 0, "rotratio": 0.0}
# Also drop maDLC smart cropping augmentation parameters
for key in ["pre_resize", "crop_size", "max_shift", "crop_sampling"]:
items2drop[key] = None
trainingdata = MakeTrain_pose_yaml(
items2change, path_train_config, defaultconfigfile, items2drop
)
keys2save = [
"dataset",
"num_joints",
"all_joints",
"all_joints_names",
"net_type",
"init_weights",
"global_scale",
"location_refinement",
"locref_stdev",
]
MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
print(
"The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
)
return splits
|
41,910 |
def json_to_distribution(json_str: str) -> BaseDistribution:
"""Deserialize a distribution in JSON format.
Args:
json_str: A JSON-serialized distribution.
Returns:
A deserialized distribution.
Raises:
ValueError:
If distribution class cannot be specified.
"""
json_dict = json.loads(json_str)
if json_dict["name"] == CategoricalDistribution.__name__:
json_dict["attributes"]["choices"] = tuple(json_dict["attributes"]["choices"])
for cls in DISTRIBUTION_CLASSES:
if json_dict["name"] == cls.__name__:
return cls(**json_dict["attributes"])
raise ValueError("Unknown distribution class: {}".format(json_dict["name"]))
|
def json_to_distribution(json_str: str) -> BaseDistribution:
"""Deserialize a distribution in JSON format.
Args:
json_str: A JSON-serialized distribution.
Returns:
A deserialized distribution.
Raises:
ValueError:
If the unknown distribution class is specified.
"""
json_dict = json.loads(json_str)
if json_dict["name"] == CategoricalDistribution.__name__:
json_dict["attributes"]["choices"] = tuple(json_dict["attributes"]["choices"])
for cls in DISTRIBUTION_CLASSES:
if json_dict["name"] == cls.__name__:
return cls(**json_dict["attributes"])
raise ValueError("Unknown distribution class: {}".format(json_dict["name"]))
|
6,586 |
def get_data(filters, period_list, partner_doctype):
sales_field = frappe.scrub(partner_doctype)
sales_users_data = get_parents_data(filters, partner_doctype)
if not sales_users_data: return
sales_users, item_groups = [], []
for d in sales_users_data:
if d.parent not in sales_users:
sales_users.append(d.parent)
if d.item_group not in item_groups:
item_groups.append(d.item_group)
if item_groups:
for item_group in item_groups:
if frappe.db.get_value("Item Group", {"name":item_group}, "is_group"):
for child_item_group in frappe.get_all("Item Group", {"parent_item_group":item_group}):
if child_item_group['name'] not in item_groups:
item_group.append(child_item_group['name'])
date_field = ("transaction_date"
if filters.get('doctype') == "Sales Order" else "posting_date")
actual_data = get_actual_data(filters, item_groups, sales_users, date_field, sales_field)
return prepare_data(filters, sales_users_data,
actual_data, date_field, period_list, sales_field)
|
def get_data(filters, period_list, partner_doctype):
sales_field = frappe.scrub(partner_doctype)
sales_users_data = get_parents_data(filters, partner_doctype)
if not sales_users_data: return
sales_users, item_groups = [], []
for d in sales_users_data:
if d.parent not in sales_users:
sales_users.append(d.parent)
if d.item_group not in item_groups:
item_groups.append(d.item_group)
if item_groups:
for item_group in item_groups:
if frappe.db.get_value("Item Group", {"name":item_group}, "is_group"):
for child_item_group in frappe.get_all("Item Group", {"parent_item_group":item_group}):
if child_item_group['name'] not in item_groups:
item_groups.append(child_item_group['name'])
date_field = ("transaction_date"
if filters.get('doctype') == "Sales Order" else "posting_date")
actual_data = get_actual_data(filters, item_groups, sales_users, date_field, sales_field)
return prepare_data(filters, sales_users_data,
actual_data, date_field, period_list, sales_field)
|
40,773 |
def hash_checkpoint(
checkpoint_path: Union[str, Path],
output_dir: Union[str, Path],
) -> Tuple[Path, str]:
"""
Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``
to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
Args:
checkpoint_path: Path to the checkpoint file.
output_dir: Output directory to store the hashed checkpoint file
(will be created if not exist).
Returns:
Path to the hashed checkpoint file, the first 8 digits of SHA256 hash.
.. versionadded:: 0.5.0
"""
if isinstance(checkpoint_path, str):
checkpoint_path = Path(checkpoint_path)
if not checkpoint_path.exists():
raise FileNotFoundError(f"{checkpoint_path.name} does not exist in {checkpoint_path.parent}.")
if isinstance(output_dir, str):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
hash_obj = hashlib.sha256()
# taken from https://github.com/pytorch/vision/blob/main/references/classification/utils.py
with checkpoint_path.open("rb") as f:
# Read and update hash string value in blocks of 4KB
for byte_block in iter(lambda: f.read(4096), b""):
hash_obj.update(byte_block)
sha_hash = hash_obj.hexdigest()
old_filename = checkpoint_path.stem
new_filename = "-".join((old_filename, sha_hash[:8])) + ".pt"
hash_checkpoint_path = output_dir / new_filename
shutil.move(str(checkpoint_path), hash_checkpoint_path)
return hash_checkpoint_path, sha_hash
|
def hash_checkpoint(
checkpoint_path: Union[str, Path],
output_dir: Union[str, Path]
) -> Tuple[Path, str]:
"""
Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``
to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
Args:
checkpoint_path: Path to the checkpoint file.
output_dir: Output directory to store the hashed checkpoint file
(will be created if not exist).
Returns:
Path to the hashed checkpoint file, the first 8 digits of SHA256 hash.
.. versionadded:: 0.5.0
"""
if isinstance(checkpoint_path, str):
checkpoint_path = Path(checkpoint_path)
if not checkpoint_path.exists():
raise FileNotFoundError(f"{checkpoint_path.name} does not exist in {checkpoint_path.parent}.")
if isinstance(output_dir, str):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
hash_obj = hashlib.sha256()
# taken from https://github.com/pytorch/vision/blob/main/references/classification/utils.py
with checkpoint_path.open("rb") as f:
# Read and update hash string value in blocks of 4KB
for byte_block in iter(lambda: f.read(4096), b""):
hash_obj.update(byte_block)
sha_hash = hash_obj.hexdigest()
old_filename = checkpoint_path.stem
new_filename = "-".join((old_filename, sha_hash[:8])) + ".pt"
hash_checkpoint_path = output_dir / new_filename
shutil.move(str(checkpoint_path), hash_checkpoint_path)
return hash_checkpoint_path, sha_hash
|
59,673 |
def test_project_no_data():
"""
Run project without providing `data` or `generate`.
"""
with pytest.raises(GMTInvalidInput):
project(center=[0, -1], azimuth=45, flat_earth=True)
|
def test_project_incorrect_parameters():
"""
Run project by providing incorrect parameters such as 1) no `center`; 2)
no `data` or `generate`; and 3) `generate` with `flags`.
"""
with pytest.raises(GMTInvalidInput):
# No `center`
project(azimuth=45)
with pytest.raises(GMTInvalidInput):
# No `data` or `generate`
project(center=[0, -1], azimuth=45, flat_earth=True)
with pytest.raises(GMTInvalidInput):
# Using `generate` with `flags`
project(center=[0, -1], generate=0.5, flags="xypqrsz")
|
31,544 |
def main():
try:
action = get_custom_field(ACTION_ON_CAMPAIGN_FIELD_NAME).lower()
ids = get_custom_field(SELECT_CAMPAIGN_INCIDENTS_FIELD_NAME)
if ALL_OPTION in ids:
ids = get_campaign_incident_ids()
res = ACTIONS_MAPPER[action](ids, action)
demisto.results(res)
except Exception as err:
return_error(str(err))
|
def main():
try:
action = get_custom_field(ACTION_ON_CAMPAIGN_FIELD_NAME).lower()
ids = get_custom_field(SELECT_CAMPAIGN_INCIDENTS_FIELD_NAME)
if ALL_OPTION in ids:
ids = get_campaign_incident_ids()
res = ACTIONS_MAPPER[action](ids, action)
return_results(res)
except Exception as err:
return_error(str(err))
|
7,235 |
def register_translation(src_image, target_image, upsample_factor=1,
space="real", return_error=True, reg_weight=1e-12):
"""
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
When there are multiple cross-correlation peaks, ties are broken by a
regularizer which favors smaller shifts. This regularization may be
disabled by setting ``reg_weight`` to zero.
Parameters
----------
src_image : array
Reference image.
target_image : array
Image to register. Must be same dimensionality as ``src_image``.
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default is 1 (no upsampling).
space : string, one of "real" or "fourier", optional
Defines how the algorithm interprets input data. "real" means data
will be FFT'd to compute the correlation, while "fourier" data will
bypass FFT of input data. Case insensitive.
return_error : bool, optional
Returns error and phase difference if when True, otherwise only shifts
are returned.
reg_weight : float, optional
Determines the strength of shift regularization.
.. versionadded:: 0.17
``reg_weight`` was introduced to break ties between
cross-correlation peaks.
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``target_image`` with
``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between ``src_image`` and
``target_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
.. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
"""
# images must be the same shape
if src_image.shape != target_image.shape:
raise ValueError("Error: images must be same size for "
"register_translation")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = src_image
target_freq = target_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_freq = fft.fftn(src_image)
target_freq = fft.fftn(target_image)
else:
raise ValueError("Error: register_translation only knows the \"real\" "
"and \"fourier\" values for the ``space`` argument.")
# Whole-pixel shift - Compute cross-correlation by an IFFT
shape = src_freq.shape
image_product = src_freq * target_freq.conj()
cross_correlation = fft.ifftn(image_product)
# Add a small regularization term so that smaller shifts are preferred when
# the cross_correlation is the same for multiple shifts.
if reg_weight > 0:
w = _area_overlap(cross_correlation)
w = fft.fftshift(w) * reg_weight
else:
w = 0
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation) + w),
cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
if upsample_factor == 1:
if return_error:
src_amp = np.sum(np.abs(src_freq)**2) / src_freq.size
target_amp = np.sum(np.abs(target_freq)**2) / target_freq.size
CCmax = cross_correlation[maxima]
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * upsample_factor) / upsample_factor
upsampled_region_size = np.ceil(upsample_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
normalization = (src_freq.size * upsample_factor**2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
cross_correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
CCmax = cross_correlation[maxima]
maxima = np.array(maxima, dtype=np.float64) - dftshift
shifts = shifts + maxima / upsample_factor
if return_error:
src_amp = _upsampled_dft(src_freq * src_freq.conj(),
1, upsample_factor)[0, 0]
src_amp /= normalization
target_amp = _upsampled_dft(target_freq * target_freq.conj(),
1, upsample_factor)[0, 0]
target_amp /= normalization
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(src_freq.ndim):
if shape[dim] == 1:
shifts[dim] = 0
if return_error:
return shifts, _compute_error(CCmax, src_amp, target_amp),\
_compute_phasediff(CCmax)
else:
return shifts
|
def register_translation(src_image, target_image, upsample_factor=1,
space="real", return_error=True, reg_weight=1e-12):
"""
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
When there are multiple cross-correlation peaks, ties are broken by a
regularizer which favors smaller shifts. This regularization may be
disabled by setting ``reg_weight`` to zero.
Parameters
----------
src_image : array
Reference image.
target_image : array
Image to register. Must be same dimensionality as ``src_image``.
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default is 1 (no upsampling).
space : string, one of "real" or "fourier", optional
Defines how the algorithm interprets input data. "real" means data
will be FFT'd to compute the correlation, while "fourier" data will
bypass FFT of input data. Case insensitive.
return_error : bool, optional
Returns error and phase difference if True, otherwise only shifts
are returned.
reg_weight : float, optional
Determines the strength of shift regularization.
.. versionadded:: 0.17
``reg_weight`` was introduced to break ties between
cross-correlation peaks.
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``target_image`` with
``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between ``src_image`` and
``target_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
.. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
"""
# images must be the same shape
if src_image.shape != target_image.shape:
raise ValueError("Error: images must be same size for "
"register_translation")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = src_image
target_freq = target_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_freq = fft.fftn(src_image)
target_freq = fft.fftn(target_image)
else:
raise ValueError("Error: register_translation only knows the \"real\" "
"and \"fourier\" values for the ``space`` argument.")
# Whole-pixel shift - Compute cross-correlation by an IFFT
shape = src_freq.shape
image_product = src_freq * target_freq.conj()
cross_correlation = fft.ifftn(image_product)
# Add a small regularization term so that smaller shifts are preferred when
# the cross_correlation is the same for multiple shifts.
if reg_weight > 0:
w = _area_overlap(cross_correlation)
w = fft.fftshift(w) * reg_weight
else:
w = 0
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation) + w),
cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
if upsample_factor == 1:
if return_error:
src_amp = np.sum(np.abs(src_freq)**2) / src_freq.size
target_amp = np.sum(np.abs(target_freq)**2) / target_freq.size
CCmax = cross_correlation[maxima]
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * upsample_factor) / upsample_factor
upsampled_region_size = np.ceil(upsample_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
normalization = (src_freq.size * upsample_factor**2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
cross_correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
CCmax = cross_correlation[maxima]
maxima = np.array(maxima, dtype=np.float64) - dftshift
shifts = shifts + maxima / upsample_factor
if return_error:
src_amp = _upsampled_dft(src_freq * src_freq.conj(),
1, upsample_factor)[0, 0]
src_amp /= normalization
target_amp = _upsampled_dft(target_freq * target_freq.conj(),
1, upsample_factor)[0, 0]
target_amp /= normalization
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(src_freq.ndim):
if shape[dim] == 1:
shifts[dim] = 0
if return_error:
return shifts, _compute_error(CCmax, src_amp, target_amp),\
_compute_phasediff(CCmax)
else:
return shifts
|
32,241 |
def check_log_forwarding(topology: Topology,
device_filter_string: str = None) -> ConfigurationHygieneCheckResult:
"""
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
result: ConfigurationHygieneCheckResult = \
HygieneLookups.check_log_forwarding_profiles(topology, device_filter_str=device_filter_string)
return result
|
def check_log_forwarding(topology: Topology,
device_filter_string: Optional[str] = None) -> ConfigurationHygieneCheckResult:
"""
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
"""
result: ConfigurationHygieneCheckResult = \
HygieneLookups.check_log_forwarding_profiles(topology, device_filter_str=device_filter_string)
return result
|
41,512 |
def test_qmu(caplog):
mu = 1.0
model = pyhf.simplemodels.hepdata_like([6], [9], [3])
data = [9] + model.config.auxdata
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
with caplog.at_level(logging.WARNING, "pyhf.infer.test_statistics"):
pyhf.infer.test_statistics.qmu(mu, data, model, init_pars, par_bounds, [])
assert "WARNING qmu test statistic used for fit" in caplog.text
caplog.clear()
|
def test_qmu(caplog):
mu = 1.0
model = pyhf.simplemodels.hepdata_like([6], [9], [3])
data = [9] + model.config.auxdata
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
with caplog.at_level(logging.WARNING, "pyhf.infer.test_statistics"):
pyhf.infer.test_statistics.qmu(mu, data, model, init_pars, par_bounds, fixed_vals)
assert "WARNING qmu test statistic used for fit" in caplog.text
caplog.clear()
|
36,022 |
def test_get_by_label(setup_codes):
"""Verify that using the LABEL will retrieve the correct entity."""
entity_01, entity_02, entity_03 = setup_codes
param = CodeParamType()
identifier = '{}'.format(entity_01.label)
result = param.convert(identifier, None, None)
assert result.uuid == entity_01.uuid
|
def test_get_by_label(setup_codes):
"""Verify that using the LABEL will retrieve the correct entity."""
entity_01, entity_02, entity_03 = setup_codes
param = CodeParamType()
identifier = '{}'.format(entity_01.label)
result = param.convert(identifier, None, None)
entity_01, _, _ = setup_codes
|
51,679 |
def activate(
env, use_env_repo=False, add_view=True, shell='sh', prompt=None
):
"""Activate an environment.
To activate an environment, we add its configuration scope to the
existing Spack configuration, and we set active to the current
environment.
Arguments:
env (Environment): the environment to activate
use_env_repo (bool): use the packages exactly as they appear in the
environment's repository
add_view (bool): generate commands to add view to path variables
shell (string): One of `sh`, `csh`, `fish`.
prompt (string): string to add to the users prompt, or None
Returns:
cmds: Shell commands to activate environment.
TODO: environment to use the activated spack environment.
"""
global _active_environment
_active_environment = env
prepare_config_scope(_active_environment)
if use_env_repo:
spack.repo.path.put_first(_active_environment.repo)
tty.debug("Using environmennt '%s'" % _active_environment.name)
# Construct the commands to run
cmds = ''
if shell == 'csh':
# TODO: figure out how to make color work for csh
cmds += 'setenv SPACK_ENV %s;\n' % env.path
cmds += 'alias despacktivate "spack env deactivate";\n'
if prompt:
cmds += 'if (! $?SPACK_OLD_PROMPT ) '
cmds += 'setenv SPACK_OLD_PROMPT "${prompt}";\n'
cmds += 'set prompt="%s ${prompt}";\n' % prompt
elif shell == 'fish':
if os.getenv('TERM') and 'color' in os.getenv('TERM') and prompt:
prompt = colorize('@G{%s} ' % prompt, color=True)
cmds += 'set -gx SPACK_ENV %s;\n' % env.path
cmds += 'function despacktivate;\n'
cmds += ' spack env deactivate;\n'
cmds += 'end;\n'
#
# NOTE: We're not changing the fish_prompt function (which is fish's
# solution to the PS1 variable) here. This is a bit fiddly, and easy to
# screw up => spend time reasearching a solution. Feedback welcome.
#
else:
if os.getenv('TERM') and 'color' in os.getenv('TERM') and prompt:
prompt = colorize('@G{%s} ' % prompt, color=True)
cmds += 'export SPACK_ENV=%s;\n' % env.path
cmds += "alias despacktivate='spack env deactivate';\n"
if prompt:
cmds += 'if [ -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ -z ${PS1+x} ]; then\n'
cmds += " PS1='$$$$';\n"
cmds += ' fi;\n'
cmds += ' export SPACK_OLD_PS1="${PS1}";\n'
cmds += 'fi;\n'
cmds += 'export PS1="%s ${PS1}";\n' % prompt
#
# NOTE in the fish-shell: Path variables are a special kind of variable
# used to support colon-delimited path lists including PATH, CDPATH,
# MANPATH, PYTHONPATH, etc. All variables that end in PATH (case-sensitive)
# become PATH variables.
#
try:
if add_view and default_view_name in env.views:
with spack.store.db.read_transaction():
cmds += env.add_default_view_to_shell(shell)
except (spack.repo.UnknownPackageError,
spack.repo.UnknownNamespaceError) as e:
tty.error(e)
tty.die(
'Environment DAG is broken due to a missing package or repo.',
'To resolve without it, force concretize with the command: ',
' spack --no-env-view concretize --force')
return cmds
|
def activate(
env, use_env_repo=False, add_view=True, shell='sh', prompt=None
):
"""Activate an environment.
To activate an environment, we add its configuration scope to the
existing Spack configuration, and we set active to the current
environment.
Arguments:
env (Environment): the environment to activate
use_env_repo (bool): use the packages exactly as they appear in the
environment's repository
add_view (bool): generate commands to add view to path variables
shell (string): One of `sh`, `csh`, `fish`.
prompt (string): string to add to the users prompt, or None
Returns:
cmds: Shell commands to activate environment.
TODO: environment to use the activated spack environment.
"""
global _active_environment
_active_environment = env
prepare_config_scope(_active_environment)
if use_env_repo:
spack.repo.path.put_first(_active_environment.repo)
tty.debug("Using environmennt '%s'" % _active_environment.name)
# Construct the commands to run
cmds = ''
if shell == 'csh':
# TODO: figure out how to make color work for csh
cmds += 'setenv SPACK_ENV %s;\n' % env.path
cmds += 'alias despacktivate "spack env deactivate";\n'
if prompt:
cmds += 'if (! $?SPACK_OLD_PROMPT ) '
cmds += 'setenv SPACK_OLD_PROMPT "${prompt}";\n'
cmds += 'set prompt="%s ${prompt}";\n' % prompt
elif shell == 'fish':
if os.getenv('TERM') and 'color' in os.getenv('TERM') and prompt:
prompt = colorize('@G{%s} ' % prompt, color=True)
cmds += 'set -gx SPACK_ENV %s;\n' % env.path
cmds += 'function despacktivate;\n'
cmds += ' spack env deactivate;\n'
cmds += 'end;\n'
#
# NOTE: We're not changing the fish_prompt function (which is fish's
# solution to the PS1 variable) here. This is a bit fiddly, and easy to
# screw up => spend time reasearching a solution. Feedback welcome.
#
else:
if os.getenv('TERM') and 'color' in os.getenv('TERM') and prompt:
prompt = colorize('@G{%s} ' % prompt, color=True)
cmds += 'export SPACK_ENV=%s;\n' % env.path
cmds += "alias despacktivate='spack env deactivate';\n"
if prompt:
cmds += 'if [ -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ -z ${PS1+x} ]; then\n'
cmds += " PS1='$$$$';\n"
cmds += ' fi;\n'
cmds += ' export SPACK_OLD_PS1="${PS1}";\n'
cmds += 'fi;\n'
cmds += 'export PS1="%s ${PS1}";\n' % prompt
#
# NOTE in the fish-shell: Path variables are a special kind of variable
# used to support colon-delimited path lists including PATH, CDPATH,
# MANPATH, PYTHONPATH, etc. All variables that end in PATH (case-sensitive)
# become PATH variables.
#
try:
if add_view and default_view_name in env.views:
with spack.store.db.read_transaction():
cmds += env.add_default_view_to_shell(shell)
except (spack.repo.UnknownPackageError,
spack.repo.UnknownNamespaceError) as e:
tty.error(e)
tty.die(
'Environment view is broken due to a missing package or repo.\n',
' To remove it and resolve the issue, ',
'force concretize with the command:\n',
' spack concretize --force')
return cmds
|
8,848 |
def find(*patterns):
"""Decorate a function to be called each time patterns is found in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@find('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more rules::
@find('here')
@find('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a ``PRIVMSG``, the function
will execute for each time in a string said matches the expression. Each
match will also contains the position of the instance it found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@find('$nickname')
# will trigger for each time the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match for each non-overlapping matches, from left
to right, and the function will execute for each of these matches.
To match only once from anywhere in the line, use the :func:`search`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "find_rules"):
function.find_rules = []
for value in patterns:
if value not in function.find_rules:
function.find_rules.append(value)
return function
return add_attribute
|
def find(*patterns):
"""Decorate a function to be called each time patterns is found in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@find('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more rules::
@find('here')
@find('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a ``PRIVMSG``, the function
will execute for each time in a string said matches the expression. Each
match will also contains the position of the instance it found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@find('$nickname')
# will trigger for each time the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match once for each non-overlapping match, from left
to right, and the function will execute for each of these matches.
To match only once from anywhere in the line, use the :func:`search`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "find_rules"):
function.find_rules = []
for value in patterns:
if value not in function.find_rules:
function.find_rules.append(value)
return function
return add_attribute
|
30,906 |
def check_field(field_value, regex=None):
if regex:
if re.match(regex, field_value):
return True
else:
if len(field_value) > 0:
return True
return False
|
def check_field(field_value, regex=None):
if regex:
if re.match(regex, field_value):
return True
else:
if field_value:
return True
return False
|
54,077 |
def configure_load_balancer_profile(managed_outbound_ip_count, managed_outbound_ipv6_count, outbound_ips,
outbound_ip_prefixes, outbound_ports, idle_timeout, profile):
"""configure a load balancer with customer supplied values"""
if not profile:
return profile
outbound_ip_resources = _get_load_balancer_outbound_ips(outbound_ips)
outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(outbound_ip_prefixes)
if (
managed_outbound_ip_count or
managed_outbound_ipv6_count or
outbound_ip_resources or
outbound_ip_prefix_resources
):
# ips -> i_ps due to track 2 naming issue
profile.outbound_i_ps = None
profile.outbound_ip_prefixes = None
profile.managed_outbound_i_ps = None
if managed_outbound_ip_count and managed_outbound_ipv6_count:
profile.managed_outbound_i_ps = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=managed_outbound_ip_count,
count_ipv6=managed_outbound_ipv6_count
)
elif managed_outbound_ipv6_count:
profile.managed_outbound_i_ps = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count_ipv6=managed_outbound_ipv6_count
)
elif managed_outbound_ip_count:
profile.managed_outbound_i_ps = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=managed_outbound_ip_count
)
if outbound_ip_resources:
# ips -> i_ps due to track 2 naming issue
profile.outbound_i_ps = ManagedClusterLoadBalancerProfileOutboundIPs(
public_i_ps=outbound_ip_resources
)
if outbound_ip_prefix_resources:
profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=outbound_ip_prefix_resources
)
if outbound_ports:
profile.allocated_outbound_ports = outbound_ports
if idle_timeout:
profile.idle_timeout_in_minutes = idle_timeout
return profile
|
def configure_load_balancer_profile(managed_outbound_ip_count, managed_outbound_ipv6_count, outbound_ips,
outbound_ip_prefixes, outbound_ports, idle_timeout, profile):
"""configure a load balancer with customer supplied values"""
if not profile:
return profile
outbound_ip_resources = _get_load_balancer_outbound_ips(outbound_ips)
outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(outbound_ip_prefixes)
if (
managed_outbound_ip_count or
managed_outbound_ipv6_count or
outbound_ip_resources or
outbound_ip_prefix_resources
):
# ips -> i_ps due to track 2 naming issue
profile.outbound_i_ps = None
profile.outbound_ip_prefixes = None
profile.managed_outbound_i_ps = None
if managed_outbound_ip_count or managed_outbound_ipv6_count:
profile.managed_outbound_i_ps = ManagedClusterLoadBalancerProfileManagedOutboundIPs()
if managed_outbound_ip_count:
profile.managed_outbound_i_ps.count = managed_outbound_ip_count
if managed_outbound_ipv6_count:
profile.managed_outbound_i_ps.count_ipv6 = managed_outbound_ipv6_count
if outbound_ip_resources:
# ips -> i_ps due to track 2 naming issue
profile.outbound_i_ps = ManagedClusterLoadBalancerProfileOutboundIPs(
public_i_ps=outbound_ip_resources
)
if outbound_ip_prefix_resources:
profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=outbound_ip_prefix_resources
)
if outbound_ports:
profile.allocated_outbound_ports = outbound_ports
if idle_timeout:
profile.idle_timeout_in_minutes = idle_timeout
return profile
|
53,910 |
def call_api_get(url, token):
headers = {'Authorization' :'Bearer {}'.format(token)}
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise ConfigurationError('Failed to get {}. status_code={}'.format(key, response.status_code))
resultsJson = json.loads(response.text)
return resultsJson
|
def call_api_get(url, token):
headers = {'Authorization' :'Bearer {}'.format(token)}
response = self.http.get(url, headers=headers)
if response.status_code != 200:
raise ConfigurationError('Failed to get {}. status_code={}'.format(key, response.status_code))
resultsJson = json.loads(response.text)
return resultsJson
|
35,021 |
def make_inputs_dict(inputs, shape_dict, dtype_dict, fill_mode):
"""Make the inputs dictionary for a graph.
Use data from 'inputs' where specified. For input tensors
where no data has been given, generate data according to the
chosen fill-mode.
Parameters
----------
inputs : dict
A dictionary that maps input names to numpy values.
shape_dict : dict
Shape dictionary - {input_name: tuple}.
dtype_dict : dict
dtype dictionary - {input_name: dtype}.
fill_mode : str
The fill-mode to use when generating tensor data.
Can be either "zeros", "ones" or "random".
Returns
-------
inputs_dict : dict
Complete inputs dictionary - {input_name: np.array}.
"""
logger.debug("creating inputs dict")
if inputs is None:
inputs = {}
# First check all the keys in inputs exist in the graph
for input_name in inputs:
if input_name not in shape_dict.keys():
raise TVMCException(
"the input tensor '{}' is not in the graph. Expected inputs: '{}'".format(
input_name, shape_dict.keys()
)
)
# Now construct the input dict, generating tensors where no
# data already exists in 'inputs'
inputs_dict = {}
for input_name in shape_dict:
if input_name in inputs.keys():
logger.debug("setting input '%s' with user input data", input_name)
inputs_dict[input_name] = inputs[input_name]
else:
shape = shape_dict[input_name]
dtype = dtype_dict[input_name]
logger.debug(
"generating data for input '%s' (shape: %s, dtype: %s), using fill-mode '%s'",
input_name,
shape,
dtype,
fill_mode,
)
data = generate_tensor_data(shape, dtype, fill_mode)
inputs_dict[input_name] = data
return inputs_dict
|
def make_inputs_dict(inputs, shape_dict, dtype_dict, fill_mode):
"""Make the inputs dictionary for a graph.
Use data from 'inputs' where specified. For input tensors
where no data has been given, generate data according to the
chosen fill-mode.
Parameters
----------
inputs : dict, or None
A dictionary that maps input names to numpy values.
shape_dict : dict
Shape dictionary - {input_name: tuple}.
dtype_dict : dict
dtype dictionary - {input_name: dtype}.
fill_mode : str
The fill-mode to use when generating tensor data.
Can be either "zeros", "ones" or "random".
Returns
-------
inputs_dict : dict
Complete inputs dictionary - {input_name: np.array}.
"""
logger.debug("creating inputs dict")
if inputs is None:
inputs = {}
# First check all the keys in inputs exist in the graph
for input_name in inputs:
if input_name not in shape_dict.keys():
raise TVMCException(
"the input tensor '{}' is not in the graph. Expected inputs: '{}'".format(
input_name, shape_dict.keys()
)
)
# Now construct the input dict, generating tensors where no
# data already exists in 'inputs'
inputs_dict = {}
for input_name in shape_dict:
if input_name in inputs.keys():
logger.debug("setting input '%s' with user input data", input_name)
inputs_dict[input_name] = inputs[input_name]
else:
shape = shape_dict[input_name]
dtype = dtype_dict[input_name]
logger.debug(
"generating data for input '%s' (shape: %s, dtype: %s), using fill-mode '%s'",
input_name,
shape,
dtype,
fill_mode,
)
data = generate_tensor_data(shape, dtype, fill_mode)
inputs_dict[input_name] = data
return inputs_dict
|
39,751 |
def load_center_evaluate(idx_row, df_annot, path_annot, path_visu=None, col_prefix=''):
""" complete pipeline fon input image and seg_pipe, such that load them,
generate points, compute features and using given classifier predict labels
:param (int, DF:row) idx_row:
:param df_annot:
:param str path_annot:
:param str path_visu:
:param str col_prefix:
:return dict(str,float):
"""
idx, row = idx_row
dict_row = dict(row)
dict_row['image'] = os.path.splitext(os.path.basename(dict_row['path_image']))[0]
if idx not in df_annot.index:
logging.debug('particular image/slice "%s" does not contain eggs '
'of selected stage %s', idx, col_prefix)
return dict_row
name, img, segm, centres = run_train.load_image_segm_center((None, row))
if centres is None:
logging.debug('center missing "%s"', idx)
return dict_row
if not all(c in df_annot.columns for c in tl_visu.COLUMNS_POSITION_EGG_ANNOT):
raise ValueError(
'some required columns %r are missing for %s' % (tl_visu.COLUMNS_POSITION_EGG_ANNOT, df_annot.columns)
)
mask_eggs = estimate_eggs_from_info(df_annot.loc[idx], img.shape[:2])
try:
if EXPORT_ANNOT_EGGS:
path_img = os.path.join(path_annot, idx + '.png')
tl_data.io_imsave(path_img, mask_eggs.astype(np.uint8))
if VISUAL_ANNOT_EGGS:
fig = tl_visu.figure_image_segm_results(img, mask_eggs)
fig.savefig(os.path.join(path_visu, idx + '_eggs.png'))
plt.close(fig)
if VISUAL_SEGM_CENTRES:
run_clust.export_draw_image_centers_clusters(path_visu, name, img, centres, segm=segm)
labels = np.array([1] * len(centres))
dict_stat = compute_statistic_eggs_centres(
dict_row, centres, labels, mask_eggs, img, segm, path_visu, col_prefix
)
except Exception:
logging.exception('load_center_evaluate')
dict_stat = dict_row
return dict_stat
|
def load_center_evaluate(idx_row, df_annot, path_annot, path_visu=None, col_prefix=''):
""" complete pipeline fon input image and seg_pipe, such that load them,
generate points, compute features and using given classifier predict labels
:param (int, DF:row) idx_row:
:param df_annot:
:param str path_annot:
:param str path_visu:
:param str col_prefix:
:return dict(str,float):
"""
idx, row = idx_row
dict_row = dict(row)
dict_row['image'] = os.path.splitext(os.path.basename(dict_row['path_image']))[0]
if idx not in df_annot.index:
logging.debug('particular image/slice "%s" does not contain eggs of selected stage %s', idx, col_prefix)
return dict_row
name, img, segm, centres = run_train.load_image_segm_center((None, row))
if centres is None:
logging.debug('center missing "%s"', idx)
return dict_row
if not all(c in df_annot.columns for c in tl_visu.COLUMNS_POSITION_EGG_ANNOT):
raise ValueError(
'some required columns %r are missing for %s' % (tl_visu.COLUMNS_POSITION_EGG_ANNOT, df_annot.columns)
)
mask_eggs = estimate_eggs_from_info(df_annot.loc[idx], img.shape[:2])
try:
if EXPORT_ANNOT_EGGS:
path_img = os.path.join(path_annot, idx + '.png')
tl_data.io_imsave(path_img, mask_eggs.astype(np.uint8))
if VISUAL_ANNOT_EGGS:
fig = tl_visu.figure_image_segm_results(img, mask_eggs)
fig.savefig(os.path.join(path_visu, idx + '_eggs.png'))
plt.close(fig)
if VISUAL_SEGM_CENTRES:
run_clust.export_draw_image_centers_clusters(path_visu, name, img, centres, segm=segm)
labels = np.array([1] * len(centres))
dict_stat = compute_statistic_eggs_centres(
dict_row, centres, labels, mask_eggs, img, segm, path_visu, col_prefix
)
except Exception:
logging.exception('load_center_evaluate')
dict_stat = dict_row
return dict_stat
|
43,170 |
def to_cugraph(g):
"""Convert a DGL graph to a cugraph.Graph and return.
Parameters
----------
g : DGLGraph
A homogeneous graph.
Returns
-------
cugraph.Graph
The converted cugraph graph.
Notes
-----
The function only supports GPU graph input.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import cugraph
>>> import torch
>>> g = dgl.graph((torch.tensor([1, 2]), torch.tensor([1, 3]))).to('cuda')
>>> cugraph_g = g.to_cugraph()
>>> cugraph_g.edges()
src dst
0 2 3
1 1 1
"""
if g.device.type != 'cuda':
raise DGLError(f"Cannot convert a {g.device.type} graph to cugraph." +
"Call g.to('cuda') first.")
if not g.is_homogeneous:
raise DGLError("dgl.to_cugraph only supports homogeneous graphs.")
try:
import cugraph
import cudf
except ModuleNotFoundError:
raise ModuleNotFoundError("to_cugraph requires cugraph which could not be imported")
edgelist = g.edges()
src_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[0]))
dst_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[1]))
cudf_data = cudf.DataFrame({'source':src_ser, 'destination':dst_ser})
g_cugraph = cugraph.Graph(directed=True)
g_cugraph.from_cudf_edgelist(cudf_data,
source='source',
destination='destination')
return g_cugraph
|
def to_cugraph(g):
"""Convert a DGL graph to a cugraph.Graph and return.
Parameters
----------
g : DGLGraph
A homogeneous graph.
Returns
-------
cugraph.Graph
The converted cugraph graph.
Notes
-----
The function only supports GPU graph input.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import cugraph
>>> import torch
>>> g = dgl.graph((torch.tensor([1, 2]), torch.tensor([1, 3]))).to('cuda')
>>> cugraph_g = g.to_cugraph()
>>> cugraph_g.edges()
src dst
0 2 3
1 1 1
"""
if g.device.type != 'cuda':
raise DGLError(f"Cannot convert a {g.device.type} graph to cugraph." +
"Call g.to('cuda') first.")
if not g.is_homogeneous:
raise DGLError("dgl.to_cugraph only supports homogeneous graphs.")
try:
import cugraph
import cudf
except ModuleNotFoundError:
raise ModuleNotFoundError("to_cugraph requires cugraph which could not be imported")
edgelist = g.edges()
src_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[0]))
dst_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[1]))
cudf_data = cudf.DataFrame({'source':src_ser, 'destination':dst_ser})
g_cugraph = cugraph.Graph(directed=True)
g_cugraph.from_cudf_edgelist(cudf_data,
source='source',
destination='destination')
return g_cugraph
|
6,365 |
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
description="Parse program execution entries from the Amcache.hve Registry hive")
parser.add_argument("registry_hive", type=str,
help="Path to the Amcache.hve hive to process")
parser.add_argument("-v", action="store_true", dest="verbose",
help="Enable verbose output")
parser.add_argument("-t", action="store_true", dest="do_timeline",
help="Output in simple timeline format")
args = parser.parse_args(argv[1:])
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
r = Registry.Registry(args.registry_hive)
try:
ee = parse_execution_entries(r)
except NotAnAmcacheHive:
g_logger.error("doesn't appear to be an Amcache.hve hive")
return
if args.do_timeline:
entries = []
for e in ee:
for t in ["source_key_timestamp", "created_timestamp", "modified_timestamp",
"modified_timestamp2", "linker_timestamp"]:
ts = getattr(e, t)
if ts == UNIX_TIMESTAMP_ZERO:
continue
if ts == WINDOWS_TIMESTAMP_ZERO:
continue
if ts == datetime.datetime.min:
continue
entries.append(TimelineEntry(ts, t, e))
with open("timeline.csv", 'wb') as csvfile:
w=unicodecsv.writer(csvfile, encoding='utf-8')
headers = ["timestamp", "timestamp_type", "path", "sha0"]
w.writerow(headers)
for e in sorted(entries, key=lambda e: e.timestamp):
w.writerow([e.timestamp, e.type, e.entry.path, e.entry.sha1])
else:
with open("amcache.csv", 'wb') as csvfile:
w = unicodecsv.writer(csvfile, encoding='utf-8')
w.writerow(map(lambda e: e.name, FIELDS))
for e in ee:
w.writerow(map(lambda i: getattr(e, i.name), FIELDS))
|
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
description="Parse program execution entries from the Amcache.hve Registry hive")
parser.add_argument("registry_hive", type=str,
help="Path to the Amcache.hve hive to process")
parser.add_argument("-v", action="store_true", dest="verbose",
help="Enable verbose output")
parser.add_argument("-t", action="store_true", dest="do_timeline",
help="Output in simple timeline format")
args = parser.parse_args(argv[1:])
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
r = Registry.Registry(args.registry_hive)
try:
ee = parse_execution_entries(r)
except NotAnAmcacheHive:
g_logger.error("doesn't appear to be an Amcache.hve hive")
return
if args.do_timeline:
entries = []
for e in ee:
for t in ["source_key_timestamp", "created_timestamp", "modified_timestamp",
"modified_timestamp2", "linker_timestamp"]:
ts = getattr(e, t)
if ts == UNIX_TIMESTAMP_ZERO:
continue
if ts == WINDOWS_TIMESTAMP_ZERO:
continue
if ts == datetime.datetime.min:
continue
entries.append(TimelineEntry(ts, t, e))
with open("timeline.csv", 'wb') as csvfile:
w=unicodecsv.writer(csvfile, encoding='utf-8')
headers = ["timestamp", "timestamp_type", "path", "sha1"]
w.writerow(headers)
for e in sorted(entries, key=lambda e: e.timestamp):
w.writerow([e.timestamp, e.type, e.entry.path, e.entry.sha1])
else:
with open("amcache.csv", 'wb') as csvfile:
w = unicodecsv.writer(csvfile, encoding='utf-8')
w.writerow(map(lambda e: e.name, FIELDS))
for e in ee:
w.writerow(map(lambda i: getattr(e, i.name), FIELDS))
|
28,645 |
def valid_icon_size(size):
"""Icons must be power of 2 within [16, 4086]."""
return not size & (size - 1) and size in range(16, 4097)
|
def valid_icon_size(size):
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and size in range(16, 4097)
|
30,293 |
def prepare_args(args):
# removing empty keys that can be passed from playbook input
args = {k: v for (k, v) in args.items() if v}
if 'include_inactive' in args:
args['status'] = "active,inactive" if args.pop('include_inactive') == 'True' else "active"
if 'indicator_severity' in args:
args['meta.severity'] = args.pop('indicator_severity', None)
if 'tags_name' in args:
args['tags.name '] = args.pop('tags_name', None)
if 'indicator_value' in args:
args['value'] = args.pop('indicator_value', None)
return args
|
def prepare_args(args):
# removing empty keys that can be passed from playbook input
args = {k: v for (k, v) in args.items() if v}
if 'include_inactive' in args:
args['status'] = "active,inactive" if args.pop('include_inactive') == 'True' else "active"
if 'indicator_severity' in args:
args['meta.severity'] = args.pop('indicator_severity', None)
if 'tags_name' in args:
args['tags.name'] = args.pop('tags_name', None)
if 'indicator_value' in args:
args['value'] = args.pop('indicator_value', None)
return args
|
20,216 |
def remove_url_parameter(request, discards):
"""
Removes specified params from request query
and returns updated querystring.
Discards should be a dict of lists:
{param: [values]}
"""
query = request.GET.copy()
params = dict(iterlists(query))
# params = iter(query)
items = {}
for key in list(dict.keys(query)):
if key in discards:
items[key.encode('utf-8')] = [
item.encode('utf-8') for item in params[key]
if item not in discards[key]]
else:
items[key.encode('utf-8')] = [
item.encode('utf-8') for item in params[key]]
querystring = urlencode(items, 'utf-8')
return '?{}'.format(querystring) if querystring else ''
|
def remove_url_parameter(request, discards):
"""
Removes specified params from request query
and returns updated querystring.
Discards should be a dict of lists:
{param: [values]}
"""
query = request.GET.copy()
params = dict(iterlists(query))
# params = iter(query)
items = {}
for key in params.keys():
if key in discards:
items[key.encode('utf-8')] = [
item.encode('utf-8') for item in params[key]
if item not in discards[key]]
else:
items[key.encode('utf-8')] = [
item.encode('utf-8') for item in params[key]]
querystring = urlencode(items, 'utf-8')
return '?{}'.format(querystring) if querystring else ''
|
5,729 |
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that :math:`x` and :math:`m_y` are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient :math:`r` is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> from scipy.stats import mstats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> mstats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> mstats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
It is important to keep in mind that no correlation does not imply
independence unless (X, Y) is Gaussian. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let Y = abs(X). A simple check
confirms that the correlation is close to zero:
>>> x = stats.norm.rvs(size=500)
>>> y = np.abs(x)
>>> mstats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
Indeed, since the expectation of x is zero, cov(X, Y) = E(X*Y). This equals
EX*abs(X) which is zero by symmetry.
If the relationship between x and y is non-linear, the correlation
coefficient can be misleading. For example, if X has a standard normal
distribution, define Y = X if X < 0 and Y = 0 otherwise. A simple calculation
shows that corr(X, Y) = sqrt(2/Pi) = 0.797..., implying a high level of
correlation:
>>> y = np.where(x < 0, x, 0)
>>> mstats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of X and Y if X is larger
than zero which happens in about half of the cases if we sample X and Y.
There is linear dependance between X and Y in the sense that
Y = a + b*X + e, were a,b are constants and e is a random error term,
assumed to be independent of X. For simplicity, assume that X is standard
normal, a=0, b=1 and let e follow a normal distribution with mean zero
and standard deviation s>0.
>>> s = 0.5
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> mstats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
As expected, a large variance of the noise reduces the correlation, while
the correlation approaches one as the variance of the error goes to zero.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
return scipy.stats.stats.pearsonr(ma.masked_array(x, mask=m).compressed(),
ma.masked_array(y, mask=m).compressed())
|
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that :math:`x` and :math:`y` are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient :math:`r` is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> from scipy.stats import mstats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> mstats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> mstats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
It is important to keep in mind that no correlation does not imply
independence unless (X, Y) is Gaussian. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let Y = abs(X). A simple check
confirms that the correlation is close to zero:
>>> x = stats.norm.rvs(size=500)
>>> y = np.abs(x)
>>> mstats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
Indeed, since the expectation of x is zero, cov(X, Y) = E(X*Y). This equals
EX*abs(X) which is zero by symmetry.
If the relationship between x and y is non-linear, the correlation
coefficient can be misleading. For example, if X has a standard normal
distribution, define Y = X if X < 0 and Y = 0 otherwise. A simple calculation
shows that corr(X, Y) = sqrt(2/Pi) = 0.797..., implying a high level of
correlation:
>>> y = np.where(x < 0, x, 0)
>>> mstats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of X and Y if X is larger
than zero which happens in about half of the cases if we sample X and Y.
There is linear dependance between X and Y in the sense that
Y = a + b*X + e, were a,b are constants and e is a random error term,
assumed to be independent of X. For simplicity, assume that X is standard
normal, a=0, b=1 and let e follow a normal distribution with mean zero
and standard deviation s>0.
>>> s = 0.5
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> mstats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
As expected, a large variance of the noise reduces the correlation, while
the correlation approaches one as the variance of the error goes to zero.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
return scipy.stats.stats.pearsonr(ma.masked_array(x, mask=m).compressed(),
ma.masked_array(y, mask=m).compressed())
|
31,159 |
def build_results(prefix: str, key_field: str, response=None):
"""
Build results.
:type prefix: ``str``
:param prefix: Prefix for CommandResults as part of the results.
:type key_field: ``str``
:param key_field: Key field for CommandResults as part of the results.
:type response: ``response``
:param response: Response object from IdentityIQ API call.
:return: CommandResults in case of a successful response else message describing the error status.
"""
if response is not None and 200 <= response.status_code < 300:
data = response.json()
if 'Resources' in data:
outputs = transform_object_list(prefix, data['Resources'])
markdown = '### Results:\nTotal: ' + str(data['totalResults']) + '\n'
demisto.results('Total: %s' % str(data['totalResults']))
else:
outputs = transform_object(prefix, data)
markdown = '### Results:\n'
markdown += get_markdown(prefix, outputs)
return CommandResults(
readable_output=markdown,
outputs_prefix=prefix,
outputs_key_field=key_field,
outputs=outputs
)
else:
if 'status' in response.json() and 'detail' in response.json():
return ''.join((response.json()['status'], ' : ', response.json()['detail']))
elif 'status' in response.json():
return response.json()['status']
|
def build_results(prefix: str, key_field: str, response=None):
"""
Build results.
:type prefix: ``str``
:param prefix: Prefix for CommandResults as part of the results.
:type key_field: ``str``
:param key_field: Key field for CommandResults as part of the results.
:type response: ``response``
:param response: Response object from IdentityIQ API call.
:return: CommandResults in case of a successful response else message describing the error status.
"""
if response is not None and 200 <= response.status_code < 300:
data = response.json()
if 'Resources' in data:
outputs = transform_object_list(prefix, data['Resources'])
markdown = '### Results:\nTotal: ' + str(data['totalResults']) + '\n'
demisto.results('Total: %s' % str(data['totalResults']))
else:
outputs = transform_object(prefix, data)
markdown = '### Results:\n'
markdown += get_markdown(prefix, outputs)
return CommandResults(
readable_output=markdown,
outputs_prefix=prefix,
outputs_key_field=key_field,
outputs=outputs
)
else:
if 'status' in response.json() and 'detail' in response.json():
return ''.join((response.json()['status'], ' : ', response.json()['detail']))
return response.json().get('status', None)
|
28,218 |
def generate_log_file_name():
"""
Generates the name of the log file based on process id, date, time and
PYTHON_LOG_NAME
"""
pid = str(os.getpid())
dt_str = datetime.now().strftime("%y%m%d-")
python_log_name = dt_str + pid + PYTHON_LOG_NAME
return python_log_name
|
def generate_log_file_name():
"""
Generates the name of the log file based on process id, date, time and
PYTHON_LOG_NAME
"""
pid = str(os.getpid())
dt_str = datetime.now().strftime("%y%m%d")
python_log_name = dt_str + pid + PYTHON_LOG_NAME
return python_log_name
|
1,216 |
def needs_nibabel_data(subdir=None):
""" Decorator for tests needing nibabel-data
Parameters
----------
subdir : None or str
Subdirectory we need in nibabel-data directory. If None, only require
nibabel-data directory itself.
Returns
-------
skip_dec : decorator
Decorator skipping tests if required directory not present
"""
nibabel_data = get_nibabel_data()
if nibabel_data == '':
return pytest.mark.skipif(True, reason="Need nibabel-data directory for this test")
if subdir is None:
return pytest.mark.skipif(False, reason="todo")
required_path = pjoin(nibabel_data, subdir)
# Path should not be empty (as is the case for not-updated submodules)
have_files = exists(required_path) and len(listdir(required_path)) > 0
return pytest.mark.skipif(not have_files,
reason="Need files in {0} for these tests".format(required_path))
|
def needs_nibabel_data(subdir=None):
""" Decorator for tests needing nibabel-data
Parameters
----------
subdir : None or str
Subdirectory we need in nibabel-data directory. If None, only require
nibabel-data directory itself.
Returns
-------
skip_dec : decorator
Decorator skipping tests if required directory not present
"""
nibabel_data = get_nibabel_data()
if nibabel_data == '':
return pytest.mark.skipif(True, reason="Need nibabel-data directory for this test")
if subdir is None:
return pytest.mark.skipif(False, reason="Don't skip")
required_path = pjoin(nibabel_data, subdir)
# Path should not be empty (as is the case for not-updated submodules)
have_files = exists(required_path) and len(listdir(required_path)) > 0
return pytest.mark.skipif(not have_files,
reason="Need files in {0} for these tests".format(required_path))
|
38,498 |
def compute_well_rock_matrix_intersections(
gb: pp.GridBucket, cells: np.ndarray = None, tol: float = 1e-5
):
"""Compute intersections and add edge coupling between the well and the rock matrix.
To be called after the wells grids are constructed.
We are assuming convex cells and one single high dimensional grid.
Parameters:
gb (pp.GridBucket): the grid bucket containing all the elements
cells (np.ndarray, optional): a set of cells that might be considered to construct the
tree. If it is not given the tree is constructed by using all the higher
dimensional grid cells
tol (float, optional): geometric tolerance
"""
# Extract the dimension of the rock matrix, assumed to be of highest dimension
dim_max: int = gb.dim_max()
# We assume only one single higher dimensional grid, needed for the ADTree
g_max: pp.Grid = gb.grids_of_dimension(dim_max)[0]
# Construct an ADTree for fast computation
tree = pp.adtree.ADTree(2 * g_max.dim, g_max.dim)
tree.from_grid(g_max, cells)
# Extract the grids of the wells of co-dimension 2
gs_w = gb.grids_of_dimension(dim_max - 2)
# Pre-compute some well informations
nodes_w = np.empty(gs_w.size, dtype=object)
for idw, g_w in enumerate(gs_w):
g_w_cn = g_w.cell_nodes()
g_w_cells = np.arange(g_w.num_cells)
# get the cells of the 0d as segments (start, end)
first = g_w_cn.indptr[g_w_cells]
second = g_w_cn.indptr[g_w_cells + 1]
nodes_w[idw] = (
g_w_cn.indices[pp.utils.mcolon.mcolon(first, second)].reshape((-1, 2)).T
)
# Operate on the rock matrix grid
(faces, cells, _) = sps.find(g_max.cell_faces)
faces = faces[np.argsort(cells)]
nodes, _, _ = sps.find(g_max.face_nodes)
indptr = g_max.face_nodes.indptr
# Loop on all the well grids
for g_w, n_w in zip(gs_w, nodes_w):
# extract the start and end point of the segments
start = g_w.nodes[:, n_w[0]]
end = g_w.nodes[:, n_w[1]]
# Lists for the cell_cell_map
primary_to_mortar_I, primary_to_mortar_J, primary_to_mortar_data = [], [], []
# Operate on the segments
for seg_id, (seg_start, seg_end) in enumerate(zip(start.T, end.T)):
# Create the box for the segment by ordering its start and end
box = np.sort(np.vstack((seg_start, seg_end)), axis=0).flatten()
seg_cells = tree.search(pp.adtree.ADTNode("dummy_node", box))
# Loop on all the higher dimensional cells
for c in seg_cells:
# For the current cell retrieve its faces
loc = slice(g_max.cell_faces.indptr[c], g_max.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
# Get the local nodes, face based
poly = np.array(
[
g_max.nodes[:, nodes[indptr[f] : indptr[f + 1]]]
for f in faces_loc
]
)
# Compute the intersections between the segment and the current higher
# dimensional cell
ratio = pp.intersections.segments_polyhedron(
seg_start, seg_end, poly, tol
)
# Store the requested information to build the projection operator
if ratio > 0:
primary_to_mortar_I += [seg_id]
primary_to_mortar_J += [c]
primary_to_mortar_data += ratio.tolist()
primary_to_mortar_int = sps.csc_matrix(
(primary_to_mortar_data, (primary_to_mortar_I, primary_to_mortar_J)),
shape=(g_w.num_cells, g_max.num_cells),
)
secondary_to_mortar_int = sps.diags(np.ones(g_w.num_cells), format="csc")
# create the mortar grid and set the maps
side_g = {pp.grids.mortar_grid.MortarSides.LEFT_SIDE: g_w.copy()}
mg = pp.MortarGrid(g_w.dim, side_g, codim=g_max.dim - g_w.dim)
mg.set_projection_to_mortar_int(primary_to_mortar_int, secondary_to_mortar_int)
mg.compute_geometry()
# add a new edge to the grid bucket
gb.add_edge((g_max, g_w), mg._primary_to_mortar_int)
d_e = gb.edge_props((g_max, g_w))
d_e["mortar_grid"] = mg
# Update the node number
gb.assign_node_ordering()
|
def compute_well_rock_matrix_intersections(
gb: pp.GridBucket, cells: np.ndarray = None, tol: float = 1e-5
):
"""Compute intersections and add edge coupling between the well and the rock matrix.
To be called after the wells grids are constructed.
We are assuming convex cells and a single high dimensional grid.
Parameters:
gb (pp.GridBucket): the grid bucket containing all the elements
cells (np.ndarray, optional): a set of cells that might be considered to construct the
tree. If it is not given the tree is constructed by using all the higher
dimensional grid cells
tol (float, optional): geometric tolerance
"""
# Extract the dimension of the rock matrix, assumed to be of highest dimension
dim_max: int = gb.dim_max()
# We assume only one single higher dimensional grid, needed for the ADTree
g_max: pp.Grid = gb.grids_of_dimension(dim_max)[0]
# Construct an ADTree for fast computation
tree = pp.adtree.ADTree(2 * g_max.dim, g_max.dim)
tree.from_grid(g_max, cells)
# Extract the grids of the wells of co-dimension 2
gs_w = gb.grids_of_dimension(dim_max - 2)
# Pre-compute some well informations
nodes_w = np.empty(gs_w.size, dtype=object)
for idw, g_w in enumerate(gs_w):
g_w_cn = g_w.cell_nodes()
g_w_cells = np.arange(g_w.num_cells)
# get the cells of the 0d as segments (start, end)
first = g_w_cn.indptr[g_w_cells]
second = g_w_cn.indptr[g_w_cells + 1]
nodes_w[idw] = (
g_w_cn.indices[pp.utils.mcolon.mcolon(first, second)].reshape((-1, 2)).T
)
# Operate on the rock matrix grid
(faces, cells, _) = sps.find(g_max.cell_faces)
faces = faces[np.argsort(cells)]
nodes, _, _ = sps.find(g_max.face_nodes)
indptr = g_max.face_nodes.indptr
# Loop on all the well grids
for g_w, n_w in zip(gs_w, nodes_w):
# extract the start and end point of the segments
start = g_w.nodes[:, n_w[0]]
end = g_w.nodes[:, n_w[1]]
# Lists for the cell_cell_map
primary_to_mortar_I, primary_to_mortar_J, primary_to_mortar_data = [], [], []
# Operate on the segments
for seg_id, (seg_start, seg_end) in enumerate(zip(start.T, end.T)):
# Create the box for the segment by ordering its start and end
box = np.sort(np.vstack((seg_start, seg_end)), axis=0).flatten()
seg_cells = tree.search(pp.adtree.ADTNode("dummy_node", box))
# Loop on all the higher dimensional cells
for c in seg_cells:
# For the current cell retrieve its faces
loc = slice(g_max.cell_faces.indptr[c], g_max.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
# Get the local nodes, face based
poly = np.array(
[
g_max.nodes[:, nodes[indptr[f] : indptr[f + 1]]]
for f in faces_loc
]
)
# Compute the intersections between the segment and the current higher
# dimensional cell
ratio = pp.intersections.segments_polyhedron(
seg_start, seg_end, poly, tol
)
# Store the requested information to build the projection operator
if ratio > 0:
primary_to_mortar_I += [seg_id]
primary_to_mortar_J += [c]
primary_to_mortar_data += ratio.tolist()
primary_to_mortar_int = sps.csc_matrix(
(primary_to_mortar_data, (primary_to_mortar_I, primary_to_mortar_J)),
shape=(g_w.num_cells, g_max.num_cells),
)
secondary_to_mortar_int = sps.diags(np.ones(g_w.num_cells), format="csc")
# create the mortar grid and set the maps
side_g = {pp.grids.mortar_grid.MortarSides.LEFT_SIDE: g_w.copy()}
mg = pp.MortarGrid(g_w.dim, side_g, codim=g_max.dim - g_w.dim)
mg.set_projection_to_mortar_int(primary_to_mortar_int, secondary_to_mortar_int)
mg.compute_geometry()
# add a new edge to the grid bucket
gb.add_edge((g_max, g_w), mg._primary_to_mortar_int)
d_e = gb.edge_props((g_max, g_w))
d_e["mortar_grid"] = mg
# Update the node number
gb.assign_node_ordering()
|
7,030 |
def test_get_platform_warn_mode(caplog):
task_conf = {
'remote': {'host': 'cylcdevbox'},
'job': {
'batch system': 'pbs',
'batch submit command template': 'some template'
}
}
output = get_platform(task_conf, warn_only=True)
for forbiddenitem in (
'batch submit command template = some template',
'host = cylcdevbox',
'batch system = pbs'
):
assert forbiddenitem in output
|
def test_get_platform_warn_mode(caplog):
task_conf = {
'remote': {'host': 'cylcdevbox'},
'job': {
'batch system': 'pbs',
'batch submit command template': 'some template'
}
}
output = get_platform(task_conf, warn_only=True)
for forbiddenitem in (
'batch submit command template = some template',
'host = cylcdevbox',
'batch system = pbs'
):
assert forbidden_item in output
|
23,085 |
def test_filtered_column_subset(tmpdir, engine):
df = pd.DataFrame({"col": range(20), "part": ["A", "B"] * 10})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, partition_on=["part"], engine=engine)
# # Filtered on a partition column
ddf2 = dd.read_parquet(tmpdir, filters=[[("part", "=", "A")]], engine=engine)
expected = df.astype({"part": "category"})
expected = expected[expected["part"] == "A"]
# length does column selection under the hood
assert len(ddf2) == 10
# explicit column selection
assert_eq(ddf2["col"].compute(), expected["col"])
assert_eq(ddf2["col"].sum().compute(), expected["col"].sum())
# full dataframe
assert_eq(ddf2.compute(), expected)
# # Filtered on a normal column
ddf2 = dd.read_parquet(tmpdir, filters=[[("col", ">=", 5)]], engine=engine)
expected = df.astype({"part": "category"})
expected = expected[expected["col"] >= 5]
if engine == "pyarrow-dataset":
# length does column selection under the hood
assert len(ddf2) == 15
# explicit column selection
assert_eq(ddf2["col"].compute(), expected["col"])
assert_eq(ddf2["col"].sum().compute(), expected["col"].sum())
# full dataframe
assert_eq(ddf2.compute(), expected)
else:
# not actually filtered
assert len(ddf2) == 20
assert_eq(ddf2["col"].compute(), df["col"])
assert_eq(ddf2["col"].sum().compute(), df["col"].sum())
assert_eq(ddf2.compute(), df.astype({"part": "category"}))
|
def test_filtered_column_subset(tmpdir, engine):
df = pd.DataFrame({"col": range(20), "part": ["A", "B"] * 10})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, partition_on=["part"], engine=engine)
# # Filtered on a partition column
ddf2 = dd.read_parquet(tmpdir, filters=[("part", "==", "A")], engine=engine)
expected = df.astype({"part": "category"})
expected = expected[expected["part"] == "A"]
# length does column selection under the hood
assert len(ddf2) == 10
# explicit column selection
assert_eq(ddf2["col"].compute(), expected["col"])
assert_eq(ddf2["col"].sum().compute(), expected["col"].sum())
# full dataframe
assert_eq(ddf2.compute(), expected)
# # Filtered on a normal column
ddf2 = dd.read_parquet(tmpdir, filters=[[("col", ">=", 5)]], engine=engine)
expected = df.astype({"part": "category"})
expected = expected[expected["col"] >= 5]
if engine == "pyarrow-dataset":
# length does column selection under the hood
assert len(ddf2) == 15
# explicit column selection
assert_eq(ddf2["col"].compute(), expected["col"])
assert_eq(ddf2["col"].sum().compute(), expected["col"].sum())
# full dataframe
assert_eq(ddf2.compute(), expected)
else:
# not actually filtered
assert len(ddf2) == 20
assert_eq(ddf2["col"].compute(), df["col"])
assert_eq(ddf2["col"].sum().compute(), df["col"].sum())
assert_eq(ddf2.compute(), df.astype({"part": "category"}))
|
5,879 |
def have_compatible_glibc(required_major, minimum_minor):
# type: (int, int) -> bool
version_str = glibc_version_string() # type: Optional[str]
if version_str is None:
return False
return check_glibc_version(version_str, required_major, minimum_minor)
|
def have_compatible_glibc(required_major, minimum_minor):
# type: (int, int) -> bool
version_str = glibc_version_string() # type: Optional[str]
if version_str is None:
return False
return check_glibc_version(version_str, required_major, minimum_minor)
|
7,518 |
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
e.g. int, numpy.uint, numpy.int8, numpy.int64, numpy.float,
numpy.float64, numpy.str.
Returns
-------
(converter, converter_type) : (function, generic data-type)
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
``converter_type`` tracks the generic data type produced by the converter
function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False') | (svals == 'True')):
raise ValueError('bool input strings must be only False or True')
vals = numpy.asarray(vals)
trues = vals == 'True'
falses = vals == 'False'
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False or True')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
|
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
e.g. int, numpy.uint, numpy.int8, numpy.int64, float,
numpy.float64, numpy.str.
Returns
-------
(converter, converter_type) : (function, generic data-type)
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
``converter_type`` tracks the generic data type produced by the converter
function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False') | (svals == 'True')):
raise ValueError('bool input strings must be only False or True')
vals = numpy.asarray(vals)
trues = vals == 'True'
falses = vals == 'False'
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False or True')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
|
12,530 |
def maybe_read_file(filename, binary_mode=None):
"""Read and return the contents of a file in a single file.read().
:param string filename: The filename of the file to read.
:param bool binary_mode: Read from file as bytes or unicode.
:returns: The contents of the file, or opening the file fails for any reason
:rtype: string
"""
# TODO(#7121): Default binary_mode=False after the python 3 switchover!
deprecated_conditional(
lambda: binary_mode is None,
removal_version='1.16.0.dev2',
entity_description='Not specifying binary_mode explicitly in maybe_read_file()',
hint_message='This will default to unicode when pants migrates to python 3!')
if binary_mode is None:
binary_mode = True
try:
return read_file(filename, binary_mode=binary_mode)
except IOError:
return None
|
def maybe_read_file(filename, binary_mode=None):
"""Read and return the contents of a file in a single file.read().
:param string filename: The filename of the file to read.
:param bool binary_mode: Read from file as bytes or unicode.
:returns: The contents of the file, or opening the file fails for any reason
:rtype: string
"""
# TODO(#7121): Default binary_mode=False after the python 3 switchover!
deprecated_conditional(
lambda: binary_mode is None,
removal_version='1.16.0.dev2',
entity_description='Not specifying binary_mode explicitly in maybe_read_file()',
hint_message='Function will default to `binary_mode=False` (i.e. unicode) when pants migrates to python 3!')
if binary_mode is None:
binary_mode = True
try:
return read_file(filename, binary_mode=binary_mode)
except IOError:
return None
|
54,981 |
def spin2_matrix_elements(sz, n_spin_orbs):
r"""Generates the table of the matrix elements
:math:`\langle \alpha, \beta \vert \hat{s}_1 \cdot \hat{s}_2 \vert \gamma, \delta \rangle`
of the two-particle spin operator :math:`\hat{s}_1 \cdot \hat{s}_2`.
The matrix elements are evaluated using the expression,
.. math::
\langle \alpha, \beta \vert \hat{s}_1 \cdot \hat{s}_2
\vert \gamma, \delta \rangle = \delta_{\alpha,\delta} \delta_{\beta,\gamma}
\left( \frac{1}{2} \delta_{m_\alpha, m_\delta+1} \delta_{m_\beta, m_\gamma-1}
+ \frac{1}{2} \delta_{m_\alpha, m_\delta-1} \delta_{m_\beta, m_\gamma+1}
+ m_\alpha m_\beta \delta_{m_\alpha, m_\delta} \delta_{m_\beta, m_\gamma} \right),
where :math:`\alpha` and :math:`m_\alpha` refer to the quantum numbers of the spatial
:math:`\varphi_\alpha({\bf r})` and spin :math:`\chi_{m_\alpha}(s_z)` wave functions,
respectively, of the single-particle state :math:`\vert \alpha \rangle`.
Args:
sz (array[float]): spin-projection quantum number of the spin-orbitals
n_spin_orbs (int): number of spin orbitals
Returns:
array: NumPy array with the table of matrix elements. First four columns
contains the indices :math:`\alpha`, :math:`\beta`, :math:`\gamma`, :math:`\delta`
and the fifth column the computed matrix element.
**Example**
>>> n_spin_orbs = 2
>>> sz = np.array([0.5, -0.5])
>>> print(spin2_matrix_elements(sz, n_spin_orbs))
[[ 0. 0. 0. 0. 0.25]
[ 0. 1. 1. 0. -0.25]
[ 1. 0. 0. 1. -0.25]
[ 1. 1. 1. 1. 0.25]
[ 0. 1. 0. 1. 0.5 ]
[ 1. 0. 1. 0. 0.5 ]]
"""
if sz.size != n_spin_orbs:
raise ValueError(
"Size of 'sz' must be equal to 'n_spin_orbs'; size got for 'sz' {}".format(sz.size)
)
n = np.arange(n_spin_orbs)
alpha = n.reshape(-1, 1, 1, 1)
beta = n.reshape(1, -1, 1, 1)
gamma = n.reshape(1, 1, -1, 1)
delta = n.reshape(1, 1, 1, -1)
# we only care about indices satisfying the following boolean mask
mask = np.logical_and(alpha // 2 == delta // 2, beta // 2 == gamma // 2)
# diagonal elements
diag_mask = np.logical_and(sz[alpha] == sz[delta], sz[beta] == sz[gamma])
diag_indices = np.argwhere(np.logical_and(mask, diag_mask))
diag_values = (sz[alpha] * sz[beta]).flatten()
diag = np.vstack([diag_indices.T, diag_values]).T
# off-diagonal elements
m1 = np.logical_and(sz[alpha] == sz[delta] + 1, sz[beta] == sz[gamma] - 1)
m2 = np.logical_and(sz[alpha] == sz[delta] - 1, sz[beta] == sz[gamma] + 1)
off_diag_mask = np.logical_and(mask, np.logical_or(m1, m2))
off_diag_indices = np.argwhere(off_diag_mask)
off_diag_values = np.full([len(off_diag_indices)], 0.5)
off_diag = np.vstack([off_diag_indices.T, off_diag_values]).T
# combine the off diagonal and diagonal tables into a single table
return np.vstack([diag, off_diag])
|
def spin2_matrix_elements(sz, n_spin_orbs):
r"""Generates the table of the matrix elements
:math:`\langle \alpha, \beta \vert \hat{s}_1 \cdot \hat{s}_2 \vert \gamma, \delta \rangle`
of the two-particle spin operator :math:`\hat{s}_1 \cdot \hat{s}_2`.
The matrix elements are evaluated using the expression,
.. math::
\langle \alpha, \beta \vert \hat{s}_1 \cdot \hat{s}_2
\vert \gamma, \delta \rangle = \delta_{\alpha,\delta} \delta_{\beta,\gamma}
\left( \frac{1}{2} \delta_{m_\alpha, m_\delta+1} \delta_{m_\beta, m_\gamma-1}
+ \frac{1}{2} \delta_{m_\alpha, m_\delta-1} \delta_{m_\beta, m_\gamma+1}
+ m_\alpha m_\beta \delta_{m_\alpha, m_\delta} \delta_{m_\beta, m_\gamma} \right),
where :math:`\alpha` and :math:`m_\alpha` refer to the quantum numbers of the spatial
:math:`\varphi_\alpha({\bf r})` and spin :math:`\chi_{m_\alpha}(s_z)` wave functions,
respectively, of the single-particle state :math:`\vert \alpha \rangle`.
Args:
sz (array[float]): spin-projection quantum number of the spin-orbitals
n_spin_orbs (int): number of spin orbitals
Returns:
array: NumPy array with the table of matrix elements. The first four columns
contain the indices :math:`\alpha`, :math:`\beta`, :math:`\gamma`, :math:`\delta`
and the fifth column stores the computed matrix element.
**Example**
>>> n_spin_orbs = 2
>>> sz = np.array([0.5, -0.5])
>>> print(spin2_matrix_elements(sz, n_spin_orbs))
[[ 0. 0. 0. 0. 0.25]
[ 0. 1. 1. 0. -0.25]
[ 1. 0. 0. 1. -0.25]
[ 1. 1. 1. 1. 0.25]
[ 0. 1. 0. 1. 0.5 ]
[ 1. 0. 1. 0. 0.5 ]]
"""
if sz.size != n_spin_orbs:
raise ValueError(
"Size of 'sz' must be equal to 'n_spin_orbs'; size got for 'sz' {}".format(sz.size)
)
n = np.arange(n_spin_orbs)
alpha = n.reshape(-1, 1, 1, 1)
beta = n.reshape(1, -1, 1, 1)
gamma = n.reshape(1, 1, -1, 1)
delta = n.reshape(1, 1, 1, -1)
# we only care about indices satisfying the following boolean mask
mask = np.logical_and(alpha // 2 == delta // 2, beta // 2 == gamma // 2)
# diagonal elements
diag_mask = np.logical_and(sz[alpha] == sz[delta], sz[beta] == sz[gamma])
diag_indices = np.argwhere(np.logical_and(mask, diag_mask))
diag_values = (sz[alpha] * sz[beta]).flatten()
diag = np.vstack([diag_indices.T, diag_values]).T
# off-diagonal elements
m1 = np.logical_and(sz[alpha] == sz[delta] + 1, sz[beta] == sz[gamma] - 1)
m2 = np.logical_and(sz[alpha] == sz[delta] - 1, sz[beta] == sz[gamma] + 1)
off_diag_mask = np.logical_and(mask, np.logical_or(m1, m2))
off_diag_indices = np.argwhere(off_diag_mask)
off_diag_values = np.full([len(off_diag_indices)], 0.5)
off_diag = np.vstack([off_diag_indices.T, off_diag_values]).T
# combine the off diagonal and diagonal tables into a single table
return np.vstack([diag, off_diag])
|
56,752 |
def test_transforms_ordered():
COORDS = {"question": np.arange(10), "thresholds": np.arange(4)}
with pm.Model(coords=COORDS) as model:
kappa = pm.Normal(
"kappa",
mu=[-3, -1, 1, 2],
sigma=1,
dims=["question", "thresholds"],
transform=pm.distributions.transforms.ordered,
)
log_prob = model.point_logps()
np.testing.assert_allclose(list(log_prob.values()), np.array([18.69]))
|
def test_transforms_ordered():
with pm.Model() as model:
kappa = pm.Normal(
"kappa",
mu=[-3, -1, 1, 2],
sigma=1,
size=(10, 4),
transform=pm.distributions.transforms.ordered,
)
log_prob = model.point_logps()
np.testing.assert_allclose(list(log_prob.values()), np.array([18.69]))
|
36,009 |
def migrate_recursively(metadata, data, folder, version=EXPORT_VERSION):
"""Recursive migration of export files from v0.1 to a newer version.
See specific migration functions for detailed descriptions.
:param metadata: the content of an export archive metadata.json file
:param data: the content of an export archive data.json file
:param folder: SandboxFolder in which the archive has been unpacked (workdir)
:param version: the version to migrate to, by default is the current export version
"""
old_version = verify_metadata_version(metadata)
try:
if old_version == version:
raise ArchiveMigrationError('Your export file is already at the version {}'.format(version))
elif old_version > version:
raise ArchiveMigrationError('Backward migrations are not supported')
elif old_version in MIGRATE_FUNCTIONS:
MIGRATE_FUNCTIONS[old_version](metadata, data, folder)
else:
raise ArchiveMigrationError('Cannot migrate from version {}'.format(old_version))
except ValueError as exception:
raise ArchiveMigrationError(exception)
except DanglingLinkError:
raise ArchiveMigrationError('Export file is invalid because it contains dangling links')
new_version = verify_metadata_version(metadata)
if new_version < version:
new_version = migrate_recursively(metadata, data, folder, version)
return new_version
|
def migrate_recursively(metadata, data, folder, version=EXPORT_VERSION):
"""Recursive migration of export files from v0.1 to a newer version.
See specific migration functions for detailed descriptions.
:param metadata: the content of an export archive metadata.json file
:param data: the content of an export archive data.json file
:param folder: SandboxFolder in which the archive has been unpacked (workdir)
:param version: the version to migrate to, by default the current export version
"""
old_version = verify_metadata_version(metadata)
try:
if old_version == version:
raise ArchiveMigrationError('Your export file is already at the version {}'.format(version))
elif old_version > version:
raise ArchiveMigrationError('Backward migrations are not supported')
elif old_version in MIGRATE_FUNCTIONS:
MIGRATE_FUNCTIONS[old_version](metadata, data, folder)
else:
raise ArchiveMigrationError('Cannot migrate from version {}'.format(old_version))
except ValueError as exception:
raise ArchiveMigrationError(exception)
except DanglingLinkError:
raise ArchiveMigrationError('Export file is invalid because it contains dangling links')
new_version = verify_metadata_version(metadata)
if new_version < version:
new_version = migrate_recursively(metadata, data, folder, version)
return new_version
|
28,619 |
def xarray_var_iter(
data, var_names=None, combined=False, skip_dims=None, reverse_selections=False, dim_order=None
):
"""Convert xarray data to an iterator over vectors.
Iterates over each var_name and all of its coordinates, returning the 1d
data.
Parameters
----------
data : xarray.Dataset
Posterior data in an xarray
var_names : iterator of strings (optional)
Should be a subset of data.data_vars. Defaults to all of them.
combined : bool
Whether to combine chains or leave them separate
skip_dims : set
dimensions to not iterate over
reverse_selections : bool
Whether to reverse selections before iterating.
dim_order: list
Order for the first dimensions. Skips dimensions not found in the variable.
Returns
-------
Iterator of (str, dict(str, any), np.array)
The string is the variable name, the dictionary are coordinate names to values,
and the array are the values of the variable at those coordinates.
"""
data_to_sel = data
if var_names is None and isinstance(data, xr.DataArray):
data_to_sel = {data.name: data}
if isinstance(dim_order, str):
dim_order = [dim_order]
for var_name, selection, iselection in xarray_sel_iter(
data,
var_names=var_names,
combined=combined,
skip_dims=skip_dims,
reverse_selections=reverse_selections,
):
selected_data = data_to_sel[var_name].sel(**selection)
if dim_order is not None:
dim_order_selected = [dim for dim in dim_order if dim in selected_data.dims]
if dim_order_selected:
selected_data = selected_data.transpose(*dim_order, ...)
yield var_name, selection, iselection, selected_data.values
|
def xarray_var_iter(
data, var_names=None, combined=False, skip_dims=None, reverse_selections=False, dim_order=None
):
"""Convert xarray data to an iterator over vectors.
Iterates over each var_name and all of its coordinates, returning the 1d
data.
Parameters
----------
data : xarray.Dataset
Posterior data in an xarray
var_names : iterator of strings (optional)
Should be a subset of data.data_vars. Defaults to all of them.
combined : bool
Whether to combine chains or leave them separate
skip_dims : set
dimensions to not iterate over
reverse_selections : bool
Whether to reverse selections before iterating.
dim_order: list
Order for the first dimensions. Skips dimensions not found in the variable.
Returns
-------
Iterator of (str, dict(str, any), np.array)
The string is the variable name, the dictionary are coordinate names to values,
and the array are the values of the variable at those coordinates.
"""
data_to_sel = data
if var_names is None and isinstance(data, xr.DataArray):
data_to_sel = {data.name: data}
if isinstance(dim_order, str):
dim_order = [dim_order]
for var_name, selection, iselection in xarray_sel_iter(
data,
var_names=var_names,
combined=combined,
skip_dims=skip_dims,
reverse_selections=reverse_selections,
):
selected_data = data_to_sel[var_name].sel(**selection)
if dim_order is not None:
dim_order_selected = [dim for dim in dim_order if dim in selected_data.dims]
if dim_order_selected:
selected_data = selected_data.transpose(*dim_order_selected, ...)
yield var_name, selection, iselection, selected_data.values
|
46,465 |
def granger_causality_tests(ts_cause: TimeSeries,
ts_effect: TimeSeries,
maxlag: int,
addconst: bool = True,
verbose: bool = True
) -> None:
"""
Provides four tests for granger non causality of 2 time series using `statsmodels.tsa.stattools.grangercausalitytests`.
Parameters
----------
ts_cause
An univariate time series. The statistical test determines if this time series
'Granger causes' the time series ts_effect (second parameter). Missing values are not supported.
if H_0 is (non causality) is rejected (p near 0), then there is a 'granger causality'.
ts_effect
Univariate time series 'Granger caused' by ts_cause.
maxlag
If an integer, computes the test for all lags up to maxlag.
If an iterable, computes the tests only for the lags in maxlag.
addconst
Include a constant in the model.
verbose
Print results.
Returns
-------
Dict
All test results, dictionary keys are the number of lags. For each lag the values are a tuple,
with the first element a dictionary with test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted model and the restriction (contrast)
matrix for the parameter f_test.
"""
ts_cause._assert_univariate()
ts_effect._assert_univariate()
raise_if(not ts_cause.has_same_time_as(ts_effect),
'ts_cause and ts_effect time series have different time index.')
return grangercausalitytests(
np.concatenate((ts_effect.values(), ts_cause.values()), axis=1),
maxlag,
addconst,
verbose
)
|
def granger_causality_tests(ts_cause: TimeSeries,
ts_effect: TimeSeries,
maxlag: int,
addconst: bool = True,
verbose: bool = True
) -> None:
"""
Provides four tests for granger non causality of 2 time series using `statsmodels.tsa.stattools.grangercausalitytests`.
Parameters
----------
ts_cause
An univariate time series. The statistical test determines if this time series
'Granger causes' the time series ts_effect (second parameter). Missing values are not supported.
if H_0 (non causality) is rejected (p near 0), then there is a 'granger causality'.
ts_effect
Univariate time series 'Granger caused' by ts_cause.
maxlag
If an integer, computes the test for all lags up to maxlag.
If an iterable, computes the tests only for the lags in maxlag.
addconst
Include a constant in the model.
verbose
Print results.
Returns
-------
Dict
All test results, dictionary keys are the number of lags. For each lag the values are a tuple,
with the first element a dictionary with test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted model and the restriction (contrast)
matrix for the parameter f_test.
"""
ts_cause._assert_univariate()
ts_effect._assert_univariate()
raise_if(not ts_cause.has_same_time_as(ts_effect),
'ts_cause and ts_effect time series have different time index.')
return grangercausalitytests(
np.concatenate((ts_effect.values(), ts_cause.values()), axis=1),
maxlag,
addconst,
verbose
)
|
14,500 |
def split_ics(ics, random_uid=False, default_timezone=None):
"""split an ics string into several according to VEVENT's UIDs
and sort the right VTIMEZONEs accordingly
ignores all other ics components
:type ics: str
:param random_uid: assign random uids to all events
:type random_uid: bool
:rtype list:
"""
cal = cal_from_ics(ics)
tzs = {}
# Since some event could have a Windows format EG : 'New Zealand Standard Time'
# for 'Pacific/Auckland' in Olson format, we should get the last format and put
# it in tzs key to avoid warning in ics_from_list (issue #876)
for item in cal.walk():
if item.name == 'VTIMEZONE':
if item['TZID'] in windows_to_olson.WINDOWS_TO_OLSON:
key = windows_to_olson.WINDOWS_TO_OLSON[item['TZID']]
else:
key = item['TZID']
tzs.update({key: item})
events_grouped = defaultdict(list)
for item in cal.walk():
if item.name == 'VEVENT':
events_grouped[item['UID']].append(item)
else:
continue
return [ics_from_list(events, tzs, random_uid, default_timezone) for uid, events in
sorted(events_grouped.items())]
|
def split_ics(ics, random_uid=False, default_timezone=None):
"""split an ics string into several according to VEVENT's UIDs
and sort the right VTIMEZONEs accordingly
ignores all other ics components
:type ics: str
:param random_uid: assign random uids to all events
:type random_uid: bool
:rtype list:
"""
cal = cal_from_ics(ics)
tzs = {}
# Since some event could have a Windows format EG : 'New Zealand Standard Time'
# for 'Pacific/Auckland' in Olson format, we should get the last format and put
# it in tzs key to avoid warning in ics_from_list (issue #876)
for item in cal.walk():
if item.name == 'VTIMEZONE':
if item['TZID'] in windows_to_olson.WINDOWS_TO_OLSON:
key = windows_to_olson.WINDOWS_TO_OLSON[item['TZID']]
else:
key = item['TZID']
tzs[key] = item
events_grouped = defaultdict(list)
for item in cal.walk():
if item.name == 'VEVENT':
events_grouped[item['UID']].append(item)
else:
continue
return [ics_from_list(events, tzs, random_uid, default_timezone) for uid, events in
sorted(events_grouped.items())]
|
2,198 |
def test_radius():
# Test that the radius is as defined, simple case
c = _CFSubcluster()
c.update(_CFSubcluster(linear_sum=np.array([0])))
c.update(_CFSubcluster(linear_sum=np.array([1])))
assert c.radius == 0.5, c.radius
c.update(_CFSubcluster(linear_sum=np.array([2])))
assert c.radius == np.sqrt(2/3), c.radius
|
def test_radius():
# Test that the radius is as defined, simple case
c = _CFSubcluster()
c.update(_CFSubcluster(linear_sum=np.array([0])))
c.update(_CFSubcluster(linear_sum=np.array([1])))
assert c.radius == pytest.approx(0.5)
c.update(_CFSubcluster(linear_sum=np.array([2])))
assert c.radius == np.sqrt(2/3), c.radius
|
44,485 |
def print_to_sdtout(results: Dict[str, utility.ExecutorResponse],
human_readable: bool):
"""Viewer to print the ExecutorResponse results to stdout.
Args:
results: A dictionary with key:command names and val: Execution response
human_readable: Print results in human readable format. If set to True
command names will be printed as visiual delimiters in new lines. If False
results are printed as a dictionary with command as key.
"""
if human_readable:
for key, val in results.items():
print('\n================', key.name, '===================\n')
if val.has_error:
print('Following error occurred during the diagnoses:', val.stderr)
continue
print(val.parsed_output)
else:
outptu_dict = {}
for key, val in results.items():
if val.has_error:
outptu_dict[
key.name] = 'Following error occurred during the diagnoses: %s' % (
val.stderr)
continue
outptu_dict[key.name] = val.json_output
# Output results in Json format with indentation to make easy to read
print(
json.dumps(
outptu_dict, sort_keys=True, indent=2, separators=(',', ': ')))
|
def print_to_sdtout(results: Dict[str, utility.ExecutorResponse],
human_readable: bool):
"""Viewer to print the ExecutorResponse results to stdout.
Args:
results: A dictionary with key:command names and val: Execution response
human_readable: Print results in human readable format. If set to True
command names will be printed as visiual delimiters in new lines. If False
results are printed as a dictionary with command as key.
"""
if human_readable:
for key, val in results.items():
print('\n================', key.name, '===================\n')
if val.has_error:
print('Following error occurred during the diagnoses:', val.stderr)
continue
print(val.parsed_output)
else:
output_dict = {}
for key, val in results.items():
if val.has_error:
outptu_dict[
key.name] = 'Following error occurred during the diagnoses: %s' % (
val.stderr)
continue
outptu_dict[key.name] = val.json_output
# Output results in Json format with indentation to make easy to read
print(
json.dumps(
outptu_dict, sort_keys=True, indent=2, separators=(',', ': ')))
|
58,821 |
def _iamaxmin(x, out, name):
if x.ndim != 1:
raise ValueError('x must be a 1D array (actual: {})'.format(x.ndim))
dtype = x.dtype.char
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise TypeError('invalid dtype')
func = getattr(cublas, 'i' + t + name)
handle = device.get_cublas_handle()
result_dtype = 'i'
result_ptr, result, mode = _setup_result_ptr(handle, out, result_dtype)
func(handle, x.size, x.data.ptr, 1, result_ptr)
cublas.setPointerMode(handle, mode)
if out is None:
out = result
elif out.dtype != result_dtype:
out[...] = result
return out
|
def _iamaxmin(x, out, name):
if x.ndim != 1:
raise ValueError('x must be a 1D array (actual: {})'.format(x.ndim))
dtype = x.dtype.char
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise TypeError('invalid dtype')
func = getattr(cublas, 'i' + t + name)
handle = device.get_cublas_handle()
result_dtype = 'i'
result_ptr, result, orig_mode = _setup_result_ptr(handle, out, result_dtype)
func(handle, x.size, x.data.ptr, 1, result_ptr)
cublas.setPointerMode(handle, orig_mode)
if out is None:
out = result
elif out.dtype != result_dtype:
out[...] = result
return out
|
25,186 |
def test_inference_parents_subscript_index():
"""Test inference of ``pathlib.Path.parents``, accessed by index."""
name_node = astroid.extract_node(
"""
from pathlib import Path
current_path = Path().resolve()
parent_path = current_path.parents[2]
parent_path
"""
)
inferred = next(name_node.infer())
assert isinstance(inferred, bases.Instance)
assert inferred.qname() == "pathlib.Path"
|
def test_inference_parents_subscript_index() -> None:
"""Test inference of ``pathlib.Path.parents``, accessed by index."""
name_node = astroid.extract_node(
"""
from pathlib import Path
current_path = Path().resolve()
parent_path = current_path.parents[2]
parent_path
"""
)
inferred = next(name_node.infer())
assert isinstance(inferred, bases.Instance)
assert inferred.qname() == "pathlib.Path"
|
53,963 |
def import_function(module, name):
"""\
Try to import function from module. If the module is not installed or
function is not part of the module, it returns a dummy function that raises
the respective import error once the function is called. This could be a
ModuleNotFoundError if the module is missing or an AttributeError if the
module is installed but the function is not exported by it.
"""
try:
module = __import__(module, fromlist=[name])
try:
func = getattr(module, name)
except AttributeError as e:
error = e
def func(*_, **__):
raise error
except ImportError as e:
error = e
def func(*_, **__):
raise error
return func
|
def import_function(module: str, name: str) -> Callable:
"""\
Try to import function from module. If the module is not installed or
function is not part of the module, it returns a dummy function that raises
the respective import error once the function is called. This could be a
ModuleNotFoundError if the module is missing or an AttributeError if the
module is installed but the function is not exported by it.
Params
-------
module
Module to import from. Can be nested, e.g. "sklearn.utils".
name
Name of function to import from module.
"""
try:
module = __import__(module, fromlist=[name])
try:
func = getattr(module, name)
except AttributeError as e:
error = e
def func(*_, **__):
raise error
except ImportError as e:
error = e
def func(*_, **__):
raise error
return func
|
56,451 |
def copy_runs_into_db(source_db_path: str,
target_db_path: str, *run_ids) -> None:
"""
Copy a selection of runs into another DB file. All runs must come from the
same experiment. They will be added to an experiment with the same name
and sample_name in the target db. If such an experiment does not exist,
it will be created.
Args:
source_db_path: Path to the source DB file
target_db_path: Path to the target DB file
run_ids: The run_ids of the runs to copy into the target DB file
"""
# Validate that all runs are from the same experiment
sql_placeholders = sql_placeholder_string(len(run_ids))
exp_id_query = f"""
SELECT exp_id
FROM runs
WHERE run_id IN {sql_placeholders}
"""
source_conn = connect(source_db_path)
cursor = source_conn.cursor()
cursor.execute(exp_id_query, run_ids)
rows = cursor.fetchall()
source_exp_ids = np.unique([exp_id for row in rows for exp_id in row])
if len(source_exp_ids) != 1:
source_conn.close()
raise ValueError('Did not receive runs from a single experiment. '
f'Got runs from experiments {source_exp_ids}')
# Fetch the attributes of the runs' experiment
# hopefully, this is enough to uniquely identify the experiment
exp_attr_names = ['name', 'sample_name', 'start_time', 'end_time',
'format_string']
attrs_query = f"""
SELECT {','.join(exp_attr_names)}
FROM experiments
WHERE exp_id = ?
"""
cursor = source_conn.cursor()
cursor.execute(attrs_query, (source_exp_ids[0],))
row = cursor.fetchall()[0]
exp_attrs = {attr: row[attr] for attr in exp_attr_names}
# Massage the target DB file to accomodate the runs
# (create new experiment if needed)
target_conn = connect(target_db_path)
# this function raises if the target DB file has several experiments
# matching both the name and sample_name
try:
with atomic(target_conn) as target_conn:
target_exp_id = _create_exp_if_needed(target_conn,
exp_attrs['name'],
exp_attrs['sample_name'],
exp_attrs['format_string'],
exp_attrs['start_time'],
exp_attrs['end_time'])
# Finally insert the runs
for run_id in run_ids:
_copy_single_dataset_into_db(DataSet(run_id=run_id,
conn=source_conn),
target_conn,
target_exp_id)
finally:
source_conn.close()
target_conn.close()
|
def copy_runs_into_db(source_db_path: str,
target_db_path: str, *run_ids) -> None:
"""
Copy a selection of runs into another DB file. All runs must come from the
same experiment. They will be added to an experiment with the same name
and sample_name in the target db. If such an experiment does not exist,
it will be created.
Args:
source_db_path: Path to the source DB file
target_db_path: Path to the target DB file.
DB file will be created if it does not exist yet.
run_ids: The run_ids of the runs to copy into the target DB file
"""
# Validate that all runs are from the same experiment
sql_placeholders = sql_placeholder_string(len(run_ids))
exp_id_query = f"""
SELECT exp_id
FROM runs
WHERE run_id IN {sql_placeholders}
"""
source_conn = connect(source_db_path)
cursor = source_conn.cursor()
cursor.execute(exp_id_query, run_ids)
rows = cursor.fetchall()
source_exp_ids = np.unique([exp_id for row in rows for exp_id in row])
if len(source_exp_ids) != 1:
source_conn.close()
raise ValueError('Did not receive runs from a single experiment. '
f'Got runs from experiments {source_exp_ids}')
# Fetch the attributes of the runs' experiment
# hopefully, this is enough to uniquely identify the experiment
exp_attr_names = ['name', 'sample_name', 'start_time', 'end_time',
'format_string']
attrs_query = f"""
SELECT {','.join(exp_attr_names)}
FROM experiments
WHERE exp_id = ?
"""
cursor = source_conn.cursor()
cursor.execute(attrs_query, (source_exp_ids[0],))
row = cursor.fetchall()[0]
exp_attrs = {attr: row[attr] for attr in exp_attr_names}
# Massage the target DB file to accomodate the runs
# (create new experiment if needed)
target_conn = connect(target_db_path)
# this function raises if the target DB file has several experiments
# matching both the name and sample_name
try:
with atomic(target_conn) as target_conn:
target_exp_id = _create_exp_if_needed(target_conn,
exp_attrs['name'],
exp_attrs['sample_name'],
exp_attrs['format_string'],
exp_attrs['start_time'],
exp_attrs['end_time'])
# Finally insert the runs
for run_id in run_ids:
_copy_single_dataset_into_db(DataSet(run_id=run_id,
conn=source_conn),
target_conn,
target_exp_id)
finally:
source_conn.close()
target_conn.close()
|
41 |
def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
|
def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
return ''
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
|
2,060 |
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9):
"""Check allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-7
relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
"""
if sp.issparse(x) and sp.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
return (np.array_equal(x.indices, y.indices) and
np.array_equal(x.indptr, y.indptr) and
np.allclose(x.data, y.data, rtol=rtol, atol=atol))
elif not sp.issparse(x) and not sp.issparse(y):
return np.allclose(x, y, rtol=rtol, atol=atol)
raise ValueError("Can only compare two sparse matrices, not a sparse "
"matrix and an array")
|
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9):
"""Check allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-7
Relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
"""
if sp.issparse(x) and sp.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
return (np.array_equal(x.indices, y.indices) and
np.array_equal(x.indptr, y.indptr) and
np.allclose(x.data, y.data, rtol=rtol, atol=atol))
elif not sp.issparse(x) and not sp.issparse(y):
return np.allclose(x, y, rtol=rtol, atol=atol)
raise ValueError("Can only compare two sparse matrices, not a sparse "
"matrix and an array")
|
38,796 |
def distribute_tests(testcases, skip_system_check, skip_prgenv_check,
node_map):
temporary_registry = None
new_checks = []
for t in testcases:
if not t.check.is_fixture():
cls = type(t.check)
basename = cls.__name__
original_var_info = cls.get_variant_info(
t.check.variant_num, recurse=True
)
def _rfm_distributed_set_run_nodes(obj):
if not obj.local:
obj.job.pin_nodes = obj._rfm_nodelist
def _rfm_distributed_set_build_nodes(obj):
if not obj.local and not obj.build_locally:
obj.build_job.pin_nodes = obj._rfm_nodelist
# We re-set the valid system and environment in a hook to
# make sure that it will not be overwriten by a parent
# post-init hook
def _rfm_distributed_set_valid_sys_env(obj):
obj.valid_systems = [t._partition.fullname]
obj.valid_prog_environs = [t._environ.name]
class BaseTest(t.check.__class__):
_rfm_nodelist = builtins.parameter(node_map[t._partition.fullname])
valid_systems = [t._partition.fullname]
valid_prog_environs = [t._environ.name]
nc = make_test(
f'__D_{t._partition.name}_{t._environ.name}_{basename}',
(BaseTest, ),
{},
methods=[
builtins.run_before('run')(_rfm_distributed_set_run_nodes),
builtins.run_before('compile')(_rfm_distributed_set_build_nodes),
# TODO this hook is not working properly
# builtins.run_after('init')(_rfm_distributed_set_valid_sys_env),
]
)
# We have to set the prefix manually
nc._rfm_dynamic_test_prefix = t.check.prefix
for i in range(nc.num_variants):
# Check if this variant should be instantiated
var_info = copy.deepcopy(nc.get_variant_info(i, recurse=True))
var_info['params'].pop('_rfm_nodelist')
if var_info == original_var_info:
if temporary_registry is None:
temporary_registry = TestRegistry.create(nc, variant_num=i)
else:
temporary_registry.add(nc, variant_num=i)
if temporary_registry:
new_checks = temporary_registry.instantiate_all()
return generate_testcases(new_checks, skip_system_check,
skip_prgenv_check)
else:
return []
|
def distribute_tests(testcases, skip_system_check, skip_prgenv_check,
node_map):
tmp_registry = None
new_checks = []
for t in testcases:
if not t.check.is_fixture():
cls = type(t.check)
basename = cls.__name__
original_var_info = cls.get_variant_info(
t.check.variant_num, recurse=True
)
def _rfm_distributed_set_run_nodes(obj):
if not obj.local:
obj.job.pin_nodes = obj._rfm_nodelist
def _rfm_distributed_set_build_nodes(obj):
if not obj.local and not obj.build_locally:
obj.build_job.pin_nodes = obj._rfm_nodelist
# We re-set the valid system and environment in a hook to
# make sure that it will not be overwriten by a parent
# post-init hook
def _rfm_distributed_set_valid_sys_env(obj):
obj.valid_systems = [t._partition.fullname]
obj.valid_prog_environs = [t._environ.name]
class BaseTest(t.check.__class__):
_rfm_nodelist = builtins.parameter(node_map[t._partition.fullname])
valid_systems = [t._partition.fullname]
valid_prog_environs = [t._environ.name]
nc = make_test(
f'__D_{t._partition.name}_{t._environ.name}_{basename}',
(BaseTest, ),
{},
methods=[
builtins.run_before('run')(_rfm_distributed_set_run_nodes),
builtins.run_before('compile')(_rfm_distributed_set_build_nodes),
# TODO this hook is not working properly
# builtins.run_after('init')(_rfm_distributed_set_valid_sys_env),
]
)
# We have to set the prefix manually
nc._rfm_dynamic_test_prefix = t.check.prefix
for i in range(nc.num_variants):
# Check if this variant should be instantiated
var_info = copy.deepcopy(nc.get_variant_info(i, recurse=True))
var_info['params'].pop('_rfm_nodelist')
if var_info == original_var_info:
if temporary_registry is None:
temporary_registry = TestRegistry.create(nc, variant_num=i)
else:
temporary_registry.add(nc, variant_num=i)
if temporary_registry:
new_checks = temporary_registry.instantiate_all()
return generate_testcases(new_checks, skip_system_check,
skip_prgenv_check)
else:
return []
|
52,878 |
def test_import_export_history_hidden_true_with_hidden_dataset():
app = _mock_app()
u, h, d1, d2, j = _setup_simple_cat_job(app)
d2.visible = False
app.model.session.flush()
imported_history = _import_export_history(app, h, export_files="copy", include_hidden=True)
assert d2.dataset.get_size() > 0
assert imported_history.datasets[-1].get_size() > 0
|
def test_import_export_history_hidden_true_with_hidden_dataset():
app = _mock_app()
u, h, d1, d2, j = _setup_simple_cat_job(app)
d2.visible = False
app.model.session.flush()
imported_history = _import_export_history(app, h, export_files="copy", include_hidden=True)
assert d1.dataset.get_size() == imported_history.datasets[0].get_size()
assert d2.dataset.get_size() == imported_history.datasets[1].get_size()
|
40,088 |
def _move_grype_db_archive(
grype_db_archive_local_file_location: str, output_dir: str
) -> str:
# Get the location to move the archive to
archive_file_name = os.path.basename(grype_db_archive_local_file_location)
grype_db_archive_copied_file_location = os.path.join(output_dir, archive_file_name)
if not os.path.exists(grype_db_archive_local_file_location):
logger.warn(
"Unable to move grype_db archive from {} to {} because it does not exist".format(
grype_db_archive_local_file_location,
grype_db_archive_copied_file_location,
)
)
raise FileNotFoundError
else:
# Move the archive file
logger.info(
"Moving the grype_db archive from {} to {}".format(
grype_db_archive_local_file_location,
grype_db_archive_copied_file_location,
)
)
os.replace(
grype_db_archive_local_file_location, grype_db_archive_copied_file_location
)
return grype_db_archive_copied_file_location
|
def _move_grype_db_archive(
grype_db_archive_local_file_location: str, output_dir: str
) -> str:
# Get the location to move the archive to
archive_file_name = os.path.basename(grype_db_archive_local_file_location)
grype_db_archive_copied_file_location = os.path.join(output_dir, archive_file_name)
if not os.path.exists(grype_db_archive_local_file_location):
logger.warn(
"Unable to move grype_db archive from {} to {} because it does not exist".format(
grype_db_archive_local_file_location,
grype_db_archive_copied_file_location,
)
)
raise FileNotFoundError(errno.ENOENT, "Grype DB archive file location not found", grype_db_archive_local_file_location)
else:
# Move the archive file
logger.info(
"Moving the grype_db archive from {} to {}".format(
grype_db_archive_local_file_location,
grype_db_archive_copied_file_location,
)
)
os.replace(
grype_db_archive_local_file_location, grype_db_archive_copied_file_location
)
return grype_db_archive_copied_file_location
|
52,123 |
def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: np.ndarray
:param img2: np.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1,axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1,c1) != (r2,c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
|
def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: numpy.ndarray
:param img2: numpy.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1,axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1,c1) != (r2,c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
|
34,210 |
def test_train_core_compare(run_in_default_project):
temp_dir = os.getcwd()
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "KerasPolicy"}],
},
"config_1.yml",
)
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_2.yml",
)
run_in_default_project(
"train",
"core",
"-c",
"config_1.yml",
"config_2.yml",
"--stories",
"data/stories.md",
"--out",
"core_comparison_results",
"--runs",
"2",
"--percentages",
"25",
"75",
"--augmentation",
"5",
)
assert os.path.exists(os.path.join(temp_dir, "core_comparison_results"))
sub_dirs = list_subdirectories(os.path.join(temp_dir, "core_comparison_results"))
assert len(sub_dirs) == 2
files = list_files(os.path.join(temp_dir, "core_comparison_results", sub_dirs[0]))
assert len(files) == 4
assert files[0].endswith("tar.gz")
|
def test_train_core_compare(run_in_default_project):
temp_dir = os.getcwd()
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "KerasPolicy"}],
},
"config_1.yml",
)
write_yaml_file(
{
"language": "en",
"pipeline": "supervised_embeddings",
"policies": [{"name": "MemoizationPolicy"}],
},
"config_2.yml",
)
run_in_default_project(
"train",
"core",
"-c",
"config_1.yml",
"config_2.yml",
"--stories",
"data/stories.md",
"--out",
"core_comparison_results",
"--runs",
"2",
"--percentages",
"25",
"75",
"--augmentation",
"5",
)
assert os.path.exists(os.path.join(temp_dir, "core_comparison_results"))
sub_dirs = list_subdirectories(os.path.join(temp_dir, "core_comparison_results"))
assert len(sub_dirs) == 2
model_files = list_files(os.path.join(temp_dir, "core_comparison_results", sub_dirs[0]))
assert len(files) == 4
assert files[0].endswith("tar.gz")
|
16,925 |
def test_setup_without_migraton(hass_recorder):
"""Verify the schema version without a migration."""
hass = hass_recorder()
assert recorder.get_instance(hass).schema_version == SCHEMA_VERSION
|
def test_setup_without_migration(hass_recorder):
"""Verify the schema version without a migration."""
hass = hass_recorder()
assert recorder.get_instance(hass).schema_version == SCHEMA_VERSION
|
49,343 |
def run_command(args):
# type: (Tuple) -> str
working_directory = get_safe_working_dir()
stdout = stderr = ""
proc = None
try:
proc = subprocess.Popen(
args,
cwd=working_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
if platform.python_version() < "3.3":
stdout, stderr = proc.communicate()
else:
stdout, stderr = proc.communicate(timeout=10)
except Exception as ex: # pylint:disable=broad-except
# failed to execute 'cmd' or '/bin/sh', or timed out; PowerShell and Az.Account may or may not be installed
# (handling Exception here because subprocess.SubprocessError and .TimeoutExpired were added in 3.3)
error = CredentialUnavailableError(message="Failed to invoke PowerShell")
six.raise_from(error, ex)
raise_for_error(proc.returncode, stdout, stderr)
return stdout
|
def run_command(args):
# type: (Tuple) -> str
working_directory = get_safe_working_dir()
stdout = stderr = ""
proc = None
try:
proc = subprocess.Popen(
args,
cwd=working_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
if platform.python_version() < "3.3":
stdout, stderr = proc.communicate()
else:
try:
stdout, stderr = proc.communicate(timeout=10)
except TimeoutExpired:
proc.kill()
error = CredentialUnavailableError(message="Failed to invoke PowerShell")
six.raise_from(error, ex)
except Exception as ex: # pylint:disable=broad-except
# failed to execute 'cmd' or '/bin/sh', or timed out; PowerShell and Az.Account may or may not be installed
# (handling Exception here because subprocess.SubprocessError and .TimeoutExpired were added in 3.3)
error = CredentialUnavailableError(message="Failed to invoke PowerShell")
six.raise_from(error, ex)
raise_for_error(proc.returncode, stdout, stderr)
return stdout
|
39,878 |
def _get_infura_provider(provider_uri):
# https://web3py.readthedocs.io/en/latest/providers.html#infura-mainnet
uri_breakdown = urlparse(provider_uri)
infura_envvar = 'WEB3_INFURA_PROJECT_ID'
os.environ[infura_envvar] = os.environ.get(infura_envvar, uri_breakdown.netloc)
try:
# TODO: Only testnet for now
from web3.auto.infura.goerli import w3
except InfuraKeyNotFound:
raise ProviderError(f'{infura_envvar} must be provided in order to use an Infura Web3 provider.')
# Verify Connection
connected = w3.isConnected()
if not connected:
raise ProviderError('Failed to connect to Infura node.')
return w3.provider
|
def _get_infura_provider(provider_uri):
# https://web3py.readthedocs.io/en/latest/providers.html#infura-mainnet
uri_breakdown = urlparse(provider_uri)
infura_envvar = 'WEB3_INFURA_PROJECT_ID'
os.environ[infura_envvar] = os.environ.get(infura_envvar, uri_breakdown.netloc)
try:
# TODO: Only testnet for now
from web3.auto.infura.goerli import w3
except InfuraKeyNotFound:
raise ProviderError(f'{infura_envvar} must be provided in order to use an Infura Web3 provider.')
# Verify Connection
connected = w3.isConnected()
if not connected:
raise ProviderError(f'Failed to connect to Infura node {provider_uri}.')
return w3.provider
|
14,803 |
def setup(hass, config):
"""Set up the opnsense component."""
conf = config[DOMAIN]
url = conf[CONF_URL]
api_key = conf[CONF_API_KEY]
api_secret = conf[CONF_API_SECRET]
verify_ssl = conf.get(CONF_VERIFY_SSL, False)
tracker_interfaces = conf.get(CONF_TRACKER_INTERFACE, None)
from pyopnsense import diagnostics
if tracker_interfaces:
# Verify that specified tracker interfaces are valid
netinsight_client = diagnostics.NetworkInsightClient(
api_key, api_secret, url, verify_ssl
)
interfaces = list(netinsight_client.get_interfaces().values())
for interface in tracker_interfaces:
if interface not in interfaces:
_LOGGER.error(
"Specified OPNsense tracker interface %s is not " "found", interface
)
return False
else:
tracker_interfaces = ["LAN"]
interfaces_client = diagnostics.InterfaceClient(
api_key, api_secret, url, verify_ssl
)
clients = {"interfaces": interfaces_client}
hass.data[OPNSENSE_DATA] = clients
hass.async_create_task(
async_load_platform(hass, "device_tracker", DOMAIN, tracker_interfaces, config)
)
return True
|
def setup(hass, config):
"""Set up the opnsense component."""
conf = config[DOMAIN]
url = conf[CONF_URL]
api_key = conf[CONF_API_KEY]
api_secret = conf[CONF_API_SECRET]
verify_ssl = conf.get(CONF_VERIFY_SSL, False)
tracker_interfaces = conf.get(CONF_TRACKER_INTERFACE, None)
from pyopnsense import diagnostics
if tracker_interfaces:
# Verify that specified tracker interfaces are valid
netinsight_client = diagnostics.NetworkInsightClient(
api_key, api_secret, url, verify_ssl
)
interfaces = list(netinsight_client.get_interfaces().values())
for interface in tracker_interfaces:
if interface not in interfaces:
_LOGGER.error(
"Specified OPNsense tracker interface %s is not " "found", interface
)
return False
else:
tracker_interfaces = ["LAN"]
interfaces_client = diagnostics.InterfaceClient(
api_key, api_secret, url, verify_ssl
)
clients = {"interfaces": interfaces_client}
hass.data[OPNSENSE_DATA] = {"interfaces": interfaces_client}
hass.async_create_task(
async_load_platform(hass, "device_tracker", DOMAIN, tracker_interfaces, config)
)
return True
|
2,011 |
def _fit_calibrator(clf_fitted, label_encoder_, method, X, y,
sample_weight=None):
"""Fit calibrator(s) and return a `_CalibratedClassiferPipeline`
instance.
Output of the `decision_function` method of the `clf_fitted` is used for
calibration. If this method does not exist, `predict_proba` method used.
Parameters
----------
clf_fitted : Estimator instance
Fitted classifier.
label_encoder_ : LabelEncoder instance
LabelEncoder instance fitted on all the targets.
method : {'sigmoid', 'isotonic'}
The method to use for calibration.
X : array-like
Sample data used to calibrate predictions.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : ndarray, shape (n_samples,), default=None
Sample weights. If `None`, then samples are equally weighted.
Returns
-------
pipeline : _CalibratedClassiferPipeline instance
"""
Y = label_binarize(y, classes=label_encoder_.classes_)
df, idx_pos_class = _get_predictions(clf_fitted, X, label_encoder_)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif method == 'sigmoid':
calibrator = _SigmoidCalibration()
calibrated_classifiers = []
for idx, this_df in zip(idx_pos_class, df.T):
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif method == 'sigmoid':
calibrator = _SigmoidCalibration()
calibrator.fit(this_df, Y[:, idx], sample_weight)
calibrated_classifiers.append(calibrator)
pipeline = _CalibratedClassiferPipeline(
clf_fitted, calibrated_classifiers, label_encoder_
)
return pipeline
|
def _fit_calibrator(clf_fitted, label_encoder_, method, X, y,
sample_weight=None):
"""Fit calibrator(s) and return a `_CalibratedClassiferPipeline`
instance.
Output of the `decision_function` method of the `clf_fitted` is used for
calibration. If this method does not exist, `predict_proba` method is used.
Parameters
----------
clf_fitted : Estimator instance
Fitted classifier.
label_encoder_ : LabelEncoder instance
LabelEncoder instance fitted on all the targets.
method : {'sigmoid', 'isotonic'}
The method to use for calibration.
X : array-like
Sample data used to calibrate predictions.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : ndarray, shape (n_samples,), default=None
Sample weights. If `None`, then samples are equally weighted.
Returns
-------
pipeline : _CalibratedClassiferPipeline instance
"""
Y = label_binarize(y, classes=label_encoder_.classes_)
df, idx_pos_class = _get_predictions(clf_fitted, X, label_encoder_)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif method == 'sigmoid':
calibrator = _SigmoidCalibration()
calibrated_classifiers = []
for idx, this_df in zip(idx_pos_class, df.T):
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif method == 'sigmoid':
calibrator = _SigmoidCalibration()
calibrator.fit(this_df, Y[:, idx], sample_weight)
calibrated_classifiers.append(calibrator)
pipeline = _CalibratedClassiferPipeline(
clf_fitted, calibrated_classifiers, label_encoder_
)
return pipeline
|
24,374 |
def validate_default_template(spec_file):
init_config_default = False
instances_default = False
if 'template: init_config' not in spec_file and 'template: instances' not in spec_file:
# This config spec does not have init_config or instances
return True
for line in spec_file.split('\n'):
if any(template in line for template in ['init_config/default', 'init_config/openmetrics', 'init_config/jmx']):
init_config_default = True
if any(template in line for template in ['instances/default', 'instances/openmetrics', 'instances/jmx']):
instances_default = True
if instances_default and init_config_default:
return True
return False
|
def validate_default_template(spec_file):
init_config_default = False
instances_default = False
if 'template: init_config' not in spec_file or 'template: instances' not in spec_file:
# This config spec does not have init_config or instances
return True
for line in spec_file.split('\n'):
if any(template in line for template in ['init_config/default', 'init_config/openmetrics', 'init_config/jmx']):
init_config_default = True
if any(template in line for template in ['instances/default', 'instances/openmetrics', 'instances/jmx']):
instances_default = True
if instances_default and init_config_default:
return True
return False
|
27,252 |
def test_table_drop_consistency():
t = ibis.table(
[('a', 'int64'), ('b', 'string'), ('c', 'timestamp')], name='t'
)
e1 = t.projection(["a", "c"])
e2 = t.drop(["b"])
e3 = t.drop("b")
assert e1.schema() == e2.schema()
assert e1.schema() == e3.schema()
assert not(e1.schema() == t.schema())
assert not("b" in e1.columns)
assert "a" in e1.columns
assert "c" in e2.columns
|
def test_table_drop_consistency():
t = ibis.table(
[('a', 'int64'), ('b', 'string'), ('c', 'timestamp')], name='t'
)
e1 = t.projection(["a", "c"])
e2 = t.drop(["b"])
e3 = t.drop("b")
assert e1.schema() == e2.schema()
assert e1.schema() == e3.schema()
assert e1.schema() != t.schema()
assert not("b" in e1.columns)
assert "a" in e1.columns
assert "c" in e2.columns
|
23,578 |
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface")
commands = (
"init",
"start",
"migrate",
"delete-collection",
"flush",
"version",
"rebuild-quotas",
"create-user",
)
subparsers = parser.add_subparsers(
title="subcommands",
description="Main Kinto CLI commands",
dest="subcommand",
help="Choose and run with --help",
)
subparsers.required = True
for command in commands:
subparser = subparsers.add_parser(command)
subparser.set_defaults(which=command)
subparser.add_argument(
"--ini",
help="Application configuration file",
dest="ini_file",
required=False,
default=DEFAULT_CONFIG_FILE,
)
subparser.add_argument(
"-q",
"--quiet",
action="store_const",
const=logging.CRITICAL,
dest="verbosity",
help="Show only critical errors.",
)
subparser.add_argument(
"-v",
"--debug",
action="store_const",
const=logging.DEBUG,
dest="verbosity",
help="Show all messages, including debug messages.",
)
if command == "init":
subparser.add_argument(
"--backend",
help="{memory,redis,postgresql}",
dest="backend",
required=False,
default=None,
)
subparser.add_argument(
"--cache-backend",
help="{memory,redis,postgresql,memcached}",
dest="cache-backend",
required=False,
default=None,
)
subparser.add_argument(
"--host",
help="Host to listen() on.",
dest="host",
required=False,
default="127.0.0.1",
)
elif command == "migrate":
subparser.add_argument(
"--dry-run",
action="store_true",
help="Simulate the migration operations " "and show information",
dest="dry_run",
required=False,
default=False,
)
elif command == "delete-collection":
subparser.add_argument(
"--bucket",
help="The bucket where the collection " "belongs to.",
required=True
)
subparser.add_argument(
"--collection",
help="The collection to remove.",
required=True
)
elif command == "flush":
subparser.add_argument(
"--flush-cache",
#action=".flush",
help="Clears the Cache from the Memory Backend",
required=False,
default=False,
)
elif command == "rebuild-quotas":
subparser.add_argument(
"--dry-run",
action="store_true",
help="Simulate the rebuild operation " "and show information",
dest="dry_run",
required=False,
default=False,
)
elif command == "start":
subparser.add_argument(
"--reload",
action="store_true",
help="Restart when code or config changes",
required=False,
default=False,
)
subparser.add_argument(
"--port",
type=int,
help="Listening port number",
required=False,
default=DEFAULT_PORT,
)
elif command == "create-user":
subparser.add_argument(
"-u", "--username", help="Superuser username", required=False, default=None
)
subparser.add_argument(
"-p", "--password", help="Superuser password", required=False, default=None
)
# Parse command-line arguments
parsed_args = vars(parser.parse_args(args))
config_file = parsed_args["ini_file"]
which_command = parsed_args["which"]
# Initialize logging from
level = parsed_args.get("verbosity") or DEFAULT_LOG_LEVEL
logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)
if which_command == "init":
if os.path.exists(config_file):
print(f"{config_file} already exists.", file=sys.stderr)
return 1
backend = parsed_args["backend"]
cache_backend = parsed_args["cache-backend"]
if not backend:
while True:
prompt = (
"Select the backend you would like to use: "
"(1 - postgresql, 2 - redis, default - memory) "
)
answer = input(prompt).strip()
try:
backends = {"1": "postgresql", "2": "redis", "": "memory"}
backend = backends[answer]
break
except KeyError:
pass
if not cache_backend:
while True:
prompt = (
"Select the cache backend you would like to use: "
"(1 - postgresql, 2 - redis, 3 - memcached, default - memory) "
)
answer = input(prompt).strip()
try:
cache_backends = {
"1": "postgresql",
"2": "redis",
"3": "memcached",
"": "memory",
}
cache_backend = cache_backends[answer]
break
except KeyError:
pass
init(config_file, backend, cache_backend, parsed_args["host"])
# Install postgresql libraries if necessary
if backend == "postgresql" or cache_backend == "postgresql":
try:
import psycopg2 # NOQA
except ImportError:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "kinto[postgresql]"]
)
elif backend == "redis" or cache_backend == "redis":
try:
import kinto_redis # NOQA
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[redis]"])
elif cache_backend == "memcached":
try:
import memcache # NOQA
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[memcached]"])
elif which_command == "migrate":
dry_run = parsed_args["dry_run"]
env = bootstrap(config_file, options={"command": "migrate"})
scripts.migrate(env, dry_run=dry_run)
elif which_command == "delete-collection":
env = bootstrap(config_file, options={"command": "delete-collection"})
return scripts.delete_collection(env, parsed_args["bucket"], parsed_args["collection"])
elif which_command == "flush":
#env = bootstrap(config_file, option={"command": "flush-cache"})
if parsed_args["flush-cache"]:
return cache.flush()
elif which_command == "rebuild-quotas":
dry_run = parsed_args["dry_run"]
env = bootstrap(config_file, options={"command": "rebuild-quotas"})
return scripts.rebuild_quotas(env, dry_run=dry_run)
elif which_command == "create-user":
username = parsed_args["username"]
password = parsed_args["password"]
env = bootstrap(config_file, options={"command": "create-user"})
return create_user(env, username=username, password=password)
elif which_command == "start":
pserve_argv = ["pserve"]
if parsed_args["reload"]:
pserve_argv.append("--reload")
if level == logging.DEBUG:
pserve_argv.append("-v")
if level == logging.CRITICAL:
pserve_argv.append("-q")
pserve_argv.append(config_file)
pserve_argv.append(f"http_port={parsed_args['port']}")
pserve.main(argv=pserve_argv)
else:
print(__version__)
return 0
|
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface")
commands = (
"init",
"start",
"migrate",
"delete-collection",
"flush-cache",
"version",
"rebuild-quotas",
"create-user",
)
subparsers = parser.add_subparsers(
title="subcommands",
description="Main Kinto CLI commands",
dest="subcommand",
help="Choose and run with --help",
)
subparsers.required = True
for command in commands:
subparser = subparsers.add_parser(command)
subparser.set_defaults(which=command)
subparser.add_argument(
"--ini",
help="Application configuration file",
dest="ini_file",
required=False,
default=DEFAULT_CONFIG_FILE,
)
subparser.add_argument(
"-q",
"--quiet",
action="store_const",
const=logging.CRITICAL,
dest="verbosity",
help="Show only critical errors.",
)
subparser.add_argument(
"-v",
"--debug",
action="store_const",
const=logging.DEBUG,
dest="verbosity",
help="Show all messages, including debug messages.",
)
if command == "init":
subparser.add_argument(
"--backend",
help="{memory,redis,postgresql}",
dest="backend",
required=False,
default=None,
)
subparser.add_argument(
"--cache-backend",
help="{memory,redis,postgresql,memcached}",
dest="cache-backend",
required=False,
default=None,
)
subparser.add_argument(
"--host",
help="Host to listen() on.",
dest="host",
required=False,
default="127.0.0.1",
)
elif command == "migrate":
subparser.add_argument(
"--dry-run",
action="store_true",
help="Simulate the migration operations " "and show information",
dest="dry_run",
required=False,
default=False,
)
elif command == "delete-collection":
subparser.add_argument(
"--bucket",
help="The bucket where the collection " "belongs to.",
required=True
)
subparser.add_argument(
"--collection",
help="The collection to remove.",
required=True
)
elif command == "flush":
subparser.add_argument(
"--flush-cache",
#action=".flush",
help="Clears the Cache from the Memory Backend",
required=False,
default=False,
)
elif command == "rebuild-quotas":
subparser.add_argument(
"--dry-run",
action="store_true",
help="Simulate the rebuild operation " "and show information",
dest="dry_run",
required=False,
default=False,
)
elif command == "start":
subparser.add_argument(
"--reload",
action="store_true",
help="Restart when code or config changes",
required=False,
default=False,
)
subparser.add_argument(
"--port",
type=int,
help="Listening port number",
required=False,
default=DEFAULT_PORT,
)
elif command == "create-user":
subparser.add_argument(
"-u", "--username", help="Superuser username", required=False, default=None
)
subparser.add_argument(
"-p", "--password", help="Superuser password", required=False, default=None
)
# Parse command-line arguments
parsed_args = vars(parser.parse_args(args))
config_file = parsed_args["ini_file"]
which_command = parsed_args["which"]
# Initialize logging from
level = parsed_args.get("verbosity") or DEFAULT_LOG_LEVEL
logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)
if which_command == "init":
if os.path.exists(config_file):
print(f"{config_file} already exists.", file=sys.stderr)
return 1
backend = parsed_args["backend"]
cache_backend = parsed_args["cache-backend"]
if not backend:
while True:
prompt = (
"Select the backend you would like to use: "
"(1 - postgresql, 2 - redis, default - memory) "
)
answer = input(prompt).strip()
try:
backends = {"1": "postgresql", "2": "redis", "": "memory"}
backend = backends[answer]
break
except KeyError:
pass
if not cache_backend:
while True:
prompt = (
"Select the cache backend you would like to use: "
"(1 - postgresql, 2 - redis, 3 - memcached, default - memory) "
)
answer = input(prompt).strip()
try:
cache_backends = {
"1": "postgresql",
"2": "redis",
"3": "memcached",
"": "memory",
}
cache_backend = cache_backends[answer]
break
except KeyError:
pass
init(config_file, backend, cache_backend, parsed_args["host"])
# Install postgresql libraries if necessary
if backend == "postgresql" or cache_backend == "postgresql":
try:
import psycopg2 # NOQA
except ImportError:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "kinto[postgresql]"]
)
elif backend == "redis" or cache_backend == "redis":
try:
import kinto_redis # NOQA
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[redis]"])
elif cache_backend == "memcached":
try:
import memcache # NOQA
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[memcached]"])
elif which_command == "migrate":
dry_run = parsed_args["dry_run"]
env = bootstrap(config_file, options={"command": "migrate"})
scripts.migrate(env, dry_run=dry_run)
elif which_command == "delete-collection":
env = bootstrap(config_file, options={"command": "delete-collection"})
return scripts.delete_collection(env, parsed_args["bucket"], parsed_args["collection"])
elif which_command == "flush":
#env = bootstrap(config_file, option={"command": "flush-cache"})
if parsed_args["flush-cache"]:
return cache.flush()
elif which_command == "rebuild-quotas":
dry_run = parsed_args["dry_run"]
env = bootstrap(config_file, options={"command": "rebuild-quotas"})
return scripts.rebuild_quotas(env, dry_run=dry_run)
elif which_command == "create-user":
username = parsed_args["username"]
password = parsed_args["password"]
env = bootstrap(config_file, options={"command": "create-user"})
return create_user(env, username=username, password=password)
elif which_command == "start":
pserve_argv = ["pserve"]
if parsed_args["reload"]:
pserve_argv.append("--reload")
if level == logging.DEBUG:
pserve_argv.append("-v")
if level == logging.CRITICAL:
pserve_argv.append("-q")
pserve_argv.append(config_file)
pserve_argv.append(f"http_port={parsed_args['port']}")
pserve.main(argv=pserve_argv)
else:
print(__version__)
return 0
|
30,429 |
def build_misp_complex_filter(demisto_query: str):
"""
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
dict: dictionary created for misp to perform complex auery
or if no complex qury found retruns the original input
Example:
demisto_query should look like:
example 1: "AND:param1,param2;OR:param3;NOT:param4,param5"
example 2: "NOT:param3,param5"
example 3 (simple syntax): "param1,param2"
"""
regexAnd = r"(AND:)([^\;]+)(;)"
regexOr = r"(OR:)([^\;]+)(;)"
regexNot = r"(NOT:)([^\;]+)(;)"
andList = None
orList = None
notList = None
isComplexSearch = False
matchAnd = re.search(regexAnd, demisto_query, re.MULTILINE)
matchOr = re.search(regexOr, demisto_query, re.MULTILINE)
matchNot = re.search(regexNot, demisto_query, re.MULTILINE)
if matchAnd is not None:
andList = matchAnd.group(2).split(',')
isComplexSearch = True
if matchOr is not None:
orList = matchOr.group(2).split(',')
isComplexSearch = True
if matchNot is not None:
notList = matchNot.group(2).split(',')
isComplexSearch = True
if isComplexSearch:
misp_complex_query = MISP.build_complex_query(
or_parameters = orList,
and_parameters = andList,
not_parameters = notList)
return misp_complex_query
return demisto_query
|
def build_misp_complex_filter(demisto_query: str):
"""
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
dict: dictionary created for misp to perform complex auery
or if no complex qury found retruns the original input
Example:
demisto_query should look like:
example 1: "AND:param1,param2;OR:param3;NOT:param4,param5"
example 2: "NOT:param3,param5"
example 3 (simple syntax): "param1,param2"
"""
regexAnd = r"(AND:)([^\;]+)(;)"
regexOr = r"(OR:)([^\;]+)(;)"
regexNot = r'(NOT:)([^\;]+)(;)'
andList = None
orList = None
notList = None
isComplexSearch = False
matchAnd = re.search(regexAnd, demisto_query, re.MULTILINE)
matchOr = re.search(regexOr, demisto_query, re.MULTILINE)
matchNot = re.search(regexNot, demisto_query, re.MULTILINE)
if matchAnd is not None:
andList = matchAnd.group(2).split(',')
isComplexSearch = True
if matchOr is not None:
orList = matchOr.group(2).split(',')
isComplexSearch = True
if matchNot is not None:
notList = matchNot.group(2).split(',')
isComplexSearch = True
if isComplexSearch:
misp_complex_query = MISP.build_complex_query(
or_parameters = orList,
and_parameters = andList,
not_parameters = notList)
return misp_complex_query
return demisto_query
|
17,383 |
def get_axis(figsize, size, aspect, ax, **kwargs):
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("matplotlib is required for plot.utils.get_axis")
if figsize is not None:
if ax is not None:
raise ValueError("cannot provide both `figsize` and " "`ax` arguments")
if size is not None:
raise ValueError("cannot provide both `figsize` and " "`size` arguments")
_, ax = plt.subplots(figsize=figsize)
elif size is not None:
if ax is not None:
raise ValueError("cannot provide both `size` and `ax` arguments")
if aspect is None:
width, height = mpl.rcParams["figure.figsize"]
aspect = width / height
figsize = (size * aspect, size)
_, ax = plt.subplots(figsize=figsize)
elif aspect is not None:
raise ValueError("cannot provide `aspect` argument without `size`")
if ax is None:
ax = plt.gca()
if "projection" in kwargs:
ax = plt.axes(projection=kwargs["projection"])
if "facecolor" in kwargs:
ax.set_facecolor(kwargs["facecolor"])
return ax
|
def get_axis(figsize=None, size=None, aspect=None, ax=None, **kwargs):
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("matplotlib is required for plot.utils.get_axis")
if figsize is not None:
if ax is not None:
raise ValueError("cannot provide both `figsize` and " "`ax` arguments")
if size is not None:
raise ValueError("cannot provide both `figsize` and " "`size` arguments")
_, ax = plt.subplots(figsize=figsize)
elif size is not None:
if ax is not None:
raise ValueError("cannot provide both `size` and `ax` arguments")
if aspect is None:
width, height = mpl.rcParams["figure.figsize"]
aspect = width / height
figsize = (size * aspect, size)
_, ax = plt.subplots(figsize=figsize)
elif aspect is not None:
raise ValueError("cannot provide `aspect` argument without `size`")
if ax is None:
ax = plt.gca()
if "projection" in kwargs:
ax = plt.axes(projection=kwargs["projection"])
if "facecolor" in kwargs:
ax.set_facecolor(kwargs["facecolor"])
return ax
|
55,488 |
def test_read_csv_incorrect_data():
pandas_df = pandas.read_csv("modin/pandas/test/data/test_categories.json")
modin_df = pd.read_csv("modin/pandas/test/data/test_categories.json")
df_equals(pandas_df, modin_df)
|
def test_read_csv_incorrect_data():
name = "modin/pandas/test/data/test_categories.json"
pandas_df, modin_df = pandas.read_csv(name), pd.read_csv(name)
df_equals(pandas_df, modin_df)
|
21,709 |
def load_legacy_presence_router(hs: "HomeServer"):
"""Wrapper that loads a presence router module configured using the old
configuration, and registers the hooks they implement.
"""
if hs.config.presence_router_module_class is None:
return
module = hs.config.presence_router_module_class
config = hs.config.presence_router_config
api = hs.get_module_api()
presence_router = module(config=config, module_api=api)
# The known hooks. If a module implements a method which name appears in this set,
# we'll want to register it.
presence_router_methods = {
"get_users_for_states",
"get_interested_users",
}
# All methods that the module provides should be async, but this wasn't enforced
# in the old module system, so we wrap them if needed
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
def run(*args, **kwargs):
# mypy doesn't do well across function boundaries so we need to tell it
# f is definitely not None.
assert f is not None
return maybe_awaitable(f(*args, **kwargs))
return run
# Register the hooks through the module API.
hooks = {
hook: async_wrapper(getattr(presence_router, hook, None))
for hook in presence_router_methods
}
api.register_presence_router_callbacks(**hooks)
|
def load_legacy_presence_router(hs: "HomeServer"):
"""Wrapper that loads a presence router module configured using the old
configuration, and registers the hooks it implements.
"""
if hs.config.presence_router_module_class is None:
return
module = hs.config.presence_router_module_class
config = hs.config.presence_router_config
api = hs.get_module_api()
presence_router = module(config=config, module_api=api)
# The known hooks. If a module implements a method which name appears in this set,
# we'll want to register it.
presence_router_methods = {
"get_users_for_states",
"get_interested_users",
}
# All methods that the module provides should be async, but this wasn't enforced
# in the old module system, so we wrap them if needed
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
def run(*args, **kwargs):
# mypy doesn't do well across function boundaries so we need to tell it
# f is definitely not None.
assert f is not None
return maybe_awaitable(f(*args, **kwargs))
return run
# Register the hooks through the module API.
hooks = {
hook: async_wrapper(getattr(presence_router, hook, None))
for hook in presence_router_methods
}
api.register_presence_router_callbacks(**hooks)
|
10,699 |
def pack_struct(builder, values):
"""
Pack a sequence of values in a LLVM struct.
"""
structty = ir.LiteralStructType([v.type for v in values])
st = structty(ir.Undefined)
for i, v in enumerate(values):
st = builder.insert_value(st, v, i)
return st
|
def pack_struct(builder, values):
"""
Pack a sequence of values into a LLVM struct.
"""
structty = ir.LiteralStructType([v.type for v in values])
st = structty(ir.Undefined)
for i, v in enumerate(values):
st = builder.insert_value(st, v, i)
return st
|
8,809 |
def test_isupport_apply_ci():
"""Test removed parameters are case-insensitives."""
instance = isupport.ISupport()
new = instance.apply(awaylen=50, NICKLEN=31, channellen=16)
new_removed = new.apply(**{
'-awaylen': None,
'-NICKLEN': None,
'channellen': 24,
})
assert 'AWAYLEN' in new
assert 'AWAYLEN' not in new_removed
assert 'NICKLEN' in new
assert 'NICKLEN' not in new_removed
assert 'CHANNELLEN' in new
assert 'CHANNELLEN' in new_removed
assert new['CHANNELLEN'] == 16
assert new_removed['CHANNELLEN'] == 24
new_removed_ci = new.apply(**{
'-AWAYLEN': None,
'-nicklen': None,
'CHANNELLEN': 34,
})
assert 'AWAYLEN' in new
assert 'AWAYLEN' not in new_removed_ci
assert 'NICKLEN' in new
assert 'NICKLEN' not in new_removed_ci
assert 'CHANNELLEN' in new
assert 'CHANNELLEN' in new_removed_ci
assert new['CHANNELLEN'] == 16
assert new_removed_ci['CHANNELLEN'] == 34
|
def test_isupport_apply_ci():
"""Test removed parameters are case-insensitive."""
instance = isupport.ISupport()
new = instance.apply(awaylen=50, NICKLEN=31, channellen=16)
new_removed = new.apply(**{
'-awaylen': None,
'-NICKLEN': None,
'channellen': 24,
})
assert 'AWAYLEN' in new
assert 'AWAYLEN' not in new_removed
assert 'NICKLEN' in new
assert 'NICKLEN' not in new_removed
assert 'CHANNELLEN' in new
assert 'CHANNELLEN' in new_removed
assert new['CHANNELLEN'] == 16
assert new_removed['CHANNELLEN'] == 24
new_removed_ci = new.apply(**{
'-AWAYLEN': None,
'-nicklen': None,
'CHANNELLEN': 34,
})
assert 'AWAYLEN' in new
assert 'AWAYLEN' not in new_removed_ci
assert 'NICKLEN' in new
assert 'NICKLEN' not in new_removed_ci
assert 'CHANNELLEN' in new
assert 'CHANNELLEN' in new_removed_ci
assert new['CHANNELLEN'] == 16
assert new_removed_ci['CHANNELLEN'] == 34
|
30,375 |
def delete_group(client, data_args):
id = data_args.get('id')
raw_response = client.do_request('DELETE', 'groups/' + str(id))
group = {'ID': int(id), 'Deleted': True}
human_readable = 'Group has been deleted. ID = ' + str(id)
context = createContext(group, removeNull=True)
outputs = {'Tanium.Group(val.ID === obj.ID)': context}
return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response)
|
def delete_group(client, data_args):
id_ = data_args.get('id')
raw_response = client.do_request('DELETE', 'groups/' + str(id))
group = {'ID': int(id), 'Deleted': True}
human_readable = 'Group has been deleted. ID = ' + str(id)
context = createContext(group, removeNull=True)
outputs = {'Tanium.Group(val.ID === obj.ID)': context}
return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response)
|
1,515 |
def plot_partial_dependence(estimator, X, features, feature_names=None,
target=None, response_method='auto', n_cols=3,
grid_resolution=100, percentiles=(0.05, 0.95),
method='auto', n_jobs=None, verbose=0, fig=None,
line_kw=None, contour_kw=None, ax=None):
"""Partial dependence plots.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour plots. The
deciles of the feature values will be shown with tick marks on the x-axes
for one-way plots, and on both axes for two-way plots.
.. note::
:func:`plot_partial_dependence` does not support using the same axes
with multiple calls. To plot the the partial dependence for multiple
estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import plot_partial_dependence
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> X, y = make_friedman1()
>>> est = LinearRegression().fit(X, y)
>>> disp1 = plot_partial_dependence(est, X) #doctest: +SKIP
>>> disp2 = plot_partial_dependence(est, X,
... ax=disp1.axes_) #doctest: +SKIP
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like or dataframe} of shape (n_samples, n_features)
The data to use to build the grid of values on which the dependence
will be evaluated. This is usually the training data.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If features[i] is an int or a string, a one-way PDP is created; if
features[i] is a tuple, a two-way PDP is created. Each tuple must be
of size 2.
if any entry is a string, then it must be in ``feature_names``.
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; feature_names[i] holds the name of the feature
with index i.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, optional (default=None)
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : 'auto', 'predict_proba' or 'decision_function', \
optional (default='auto')
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is 'recursion', the response is always the output of
:term:`decision_function`.
n_cols : int, optional (default=3)
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, optional (default=100)
The number of equally spaced points on the axes of the plots, for each
target feature.
percentiles : tuple of float, optional (default=(0.05, 0.95))
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
method : str, optional (default='auto')
The method to use to calculate the partial dependence predictions:
- 'recursion' is only supported for gradient boosting estimator (namely
:class:`GradientBoostingClassifier<sklearn.ensemble.GradientBoostingClassifier>`,
:class:`GradientBoostingRegressor<sklearn.ensemble.GradientBoostingRegressor>`,
:class:`HistGradientBoostingClassifier<sklearn.ensemble.HistGradientBoostingClassifier>`,
:class:`HistGradientBoostingRegressor<sklearn.ensemble.HistGradientBoostingRegressor>`)
but is more efficient in terms of speed.
With this method, ``X`` is optional and is only used to build the
grid and the partial dependences are computed using the training
data. This method does not account for the ``init`` predictor of
the boosting process, which may lead to incorrect values (see
warning below. With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities.
- 'brute' is supported for any estimator, but is more
computationally intensive.
- 'auto':
- 'recursion' is used for estimators that supports it.
- 'brute' is used for all other estimators.
n_jobs : int, optional (default=None)
The number of CPUs to use to compute the partial dependences.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, optional (default=0)
Verbose output during PD computations.
fig : Matplotlib figure object, optional (default=None)
A figure object onto which the plots will be drawn, after the figure
has been cleared. By default, a new one is created.
.. deprecated:: 0.22
``fig`` will be removed in 0.24.
line_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
.. versionadded:: 0.22
Returns
-------
display: :class:`~sklearn.inspection.PartialDependenceDisplay`
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
See also
--------
sklearn.inspection.partial_dependence: Return raw partial
dependence values
Warnings
--------
The 'recursion' method only works for gradient boosting estimators, and
unlike the 'brute' method, it does not account for the ``init``
predictor of the boosting process. In practice this will produce the
same values as 'brute' up to a constant offset in the target response,
provided that ``init`` is a consant estimator (which is the default).
However, as soon as ``init`` is not a constant estimator, the partial
dependence values are incorrect for 'recursion'. This is not relevant for
:class:`HistGradientBoostingClassifier
<sklearn.ensemble.HistGradientBoostingClassifier>` and
:class:`HistGradientBoostingRegressor
<sklearn.ensemble.HistGradientBoostingRegressor>`, which do not have an
``init`` parameter.
"""
check_matplotlib_support('plot_partial_dependence') # noqa
import matplotlib.pyplot as plt # noqa
from matplotlib import transforms # noqa
from matplotlib.ticker import MaxNLocator # noqa
from matplotlib.ticker import ScalarFormatter # noqa
# set target_idx for multi-class estimators
if hasattr(estimator, 'classes_') and np.size(estimator.classes_) > 2:
if target is None:
raise ValueError('target must be specified for multi-class')
target_idx = np.searchsorted(estimator.classes_, target)
if (not (0 <= target_idx < len(estimator.classes_)) or
estimator.classes_[target_idx] != target):
raise ValueError('target not in est.classes_, got {}'.format(
target))
else:
# regression and binary classification
target_idx = 0
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not(hasattr(X, '__array__') or sparse.issparse(X)):
X = check_array(X, force_all_finite='allow-nan', dtype=np.object)
n_features = X.shape[1]
# convert feature_names to list
if feature_names is None:
if hasattr(X, "loc"):
# get the column names for a pandas dataframe
feature_names = X.columns.tolist()
else:
# define a list of numbered indices for a numpy array
feature_names = [str(i) for i in range(n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
if len(set(feature_names)) != len(feature_names):
raise ValueError('feature_names should not contain duplicates.')
def convert_feature(fx):
if isinstance(fx, str):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return int(fx)
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = tuple(convert_feature(fx) for fx in fxs)
except TypeError:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
if not 1 <= np.size(fxs) <= 2:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
tmp_features.append(fxs)
features = tmp_features
if isinstance(ax, list):
if len(ax) != len(features):
raise ValueError("Expected len(ax) == len(features), "
"got len(ax) = {}".format(len(ax)))
for i in chain.from_iterable(features):
if i >= len(feature_names):
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute averaged predictions
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(estimator, X, fxs,
response_method=response_method,
method=method,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# For multioutput regression, we can only check the validity of target
# now that we have the predictions.
# Also note: as multiclass-multioutput classifiers are not supported,
# multiclass and multioutput scenario are mutually exclusive. So there is
# no risk of overwriting target_idx here.
avg_preds, _ = pd_results[0] # checking the first result is enough
if is_regressor(estimator) and avg_preds.shape[0] > 1:
if target is None:
raise ValueError(
'target must be specified for multi-output regressors')
if not 0 <= target <= avg_preds.shape[0]:
raise ValueError(
'target must be in [0, n_tasks], got {}.'.format(target))
target_idx = target
# get global min and max average predictions of PD grouped by plot type
pdp_lim = {}
for avg_preds, values in pd_results:
min_pd = avg_preds[target_idx].min()
max_pd = avg_preds[target_idx].max()
n_fx = len(values)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
deciles = {}
for fx in chain.from_iterable(features):
if fx not in deciles:
X_col = _safe_indexing(X, fx, axis=1)
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
if fig is not None:
warnings.warn("The fig parameter is deprecated in version "
"0.22 and will be removed in version 0.24",
FutureWarning)
fig.clear()
ax = fig.gca()
display = PartialDependenceDisplay(pd_results, features, feature_names,
target_idx, pdp_lim, deciles)
return display.plot(ax=ax, n_cols=n_cols, line_kw=line_kw,
contour_kw=contour_kw)
|
def plot_partial_dependence(estimator, X, features, feature_names=None,
target=None, response_method='auto', n_cols=3,
grid_resolution=100, percentiles=(0.05, 0.95),
method='auto', n_jobs=None, verbose=0, fig=None,
line_kw=None, contour_kw=None, ax=None):
"""Partial dependence plots.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour plots. The
deciles of the feature values will be shown with tick marks on the x-axes
for one-way plots, and on both axes for two-way plots.
.. note::
:func:`plot_partial_dependence` does not support using the same axes
with multiple calls. To plot the the partial dependence for multiple
estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import plot_partial_dependence
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> X, y = make_friedman1()
>>> est = LinearRegression().fit(X, y)
>>> disp1 = plot_partial_dependence(est, X) #doctest: +SKIP
>>> disp2 = plot_partial_dependence(est, X,
... ax=disp1.axes_) # doctest: +SKIP
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like or dataframe} of shape (n_samples, n_features)
The data to use to build the grid of values on which the dependence
will be evaluated. This is usually the training data.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If features[i] is an int or a string, a one-way PDP is created; if
features[i] is a tuple, a two-way PDP is created. Each tuple must be
of size 2.
if any entry is a string, then it must be in ``feature_names``.
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; feature_names[i] holds the name of the feature
with index i.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, optional (default=None)
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : 'auto', 'predict_proba' or 'decision_function', \
optional (default='auto')
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is 'recursion', the response is always the output of
:term:`decision_function`.
n_cols : int, optional (default=3)
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, optional (default=100)
The number of equally spaced points on the axes of the plots, for each
target feature.
percentiles : tuple of float, optional (default=(0.05, 0.95))
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
method : str, optional (default='auto')
The method to use to calculate the partial dependence predictions:
- 'recursion' is only supported for gradient boosting estimator (namely
:class:`GradientBoostingClassifier<sklearn.ensemble.GradientBoostingClassifier>`,
:class:`GradientBoostingRegressor<sklearn.ensemble.GradientBoostingRegressor>`,
:class:`HistGradientBoostingClassifier<sklearn.ensemble.HistGradientBoostingClassifier>`,
:class:`HistGradientBoostingRegressor<sklearn.ensemble.HistGradientBoostingRegressor>`)
but is more efficient in terms of speed.
With this method, ``X`` is optional and is only used to build the
grid and the partial dependences are computed using the training
data. This method does not account for the ``init`` predictor of
the boosting process, which may lead to incorrect values (see
warning below. With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities.
- 'brute' is supported for any estimator, but is more
computationally intensive.
- 'auto':
- 'recursion' is used for estimators that supports it.
- 'brute' is used for all other estimators.
n_jobs : int, optional (default=None)
The number of CPUs to use to compute the partial dependences.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, optional (default=0)
Verbose output during PD computations.
fig : Matplotlib figure object, optional (default=None)
A figure object onto which the plots will be drawn, after the figure
has been cleared. By default, a new one is created.
.. deprecated:: 0.22
``fig`` will be removed in 0.24.
line_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict, optional
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
.. versionadded:: 0.22
Returns
-------
display: :class:`~sklearn.inspection.PartialDependenceDisplay`
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
See also
--------
sklearn.inspection.partial_dependence: Return raw partial
dependence values
Warnings
--------
The 'recursion' method only works for gradient boosting estimators, and
unlike the 'brute' method, it does not account for the ``init``
predictor of the boosting process. In practice this will produce the
same values as 'brute' up to a constant offset in the target response,
provided that ``init`` is a consant estimator (which is the default).
However, as soon as ``init`` is not a constant estimator, the partial
dependence values are incorrect for 'recursion'. This is not relevant for
:class:`HistGradientBoostingClassifier
<sklearn.ensemble.HistGradientBoostingClassifier>` and
:class:`HistGradientBoostingRegressor
<sklearn.ensemble.HistGradientBoostingRegressor>`, which do not have an
``init`` parameter.
"""
check_matplotlib_support('plot_partial_dependence') # noqa
import matplotlib.pyplot as plt # noqa
from matplotlib import transforms # noqa
from matplotlib.ticker import MaxNLocator # noqa
from matplotlib.ticker import ScalarFormatter # noqa
# set target_idx for multi-class estimators
if hasattr(estimator, 'classes_') and np.size(estimator.classes_) > 2:
if target is None:
raise ValueError('target must be specified for multi-class')
target_idx = np.searchsorted(estimator.classes_, target)
if (not (0 <= target_idx < len(estimator.classes_)) or
estimator.classes_[target_idx] != target):
raise ValueError('target not in est.classes_, got {}'.format(
target))
else:
# regression and binary classification
target_idx = 0
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not(hasattr(X, '__array__') or sparse.issparse(X)):
X = check_array(X, force_all_finite='allow-nan', dtype=np.object)
n_features = X.shape[1]
# convert feature_names to list
if feature_names is None:
if hasattr(X, "loc"):
# get the column names for a pandas dataframe
feature_names = X.columns.tolist()
else:
# define a list of numbered indices for a numpy array
feature_names = [str(i) for i in range(n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
if len(set(feature_names)) != len(feature_names):
raise ValueError('feature_names should not contain duplicates.')
def convert_feature(fx):
if isinstance(fx, str):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return int(fx)
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = tuple(convert_feature(fx) for fx in fxs)
except TypeError:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
if not 1 <= np.size(fxs) <= 2:
raise ValueError('Each entry in features must be either an int, '
'a string, or an iterable of size at most 2.')
tmp_features.append(fxs)
features = tmp_features
if isinstance(ax, list):
if len(ax) != len(features):
raise ValueError("Expected len(ax) == len(features), "
"got len(ax) = {}".format(len(ax)))
for i in chain.from_iterable(features):
if i >= len(feature_names):
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute averaged predictions
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(estimator, X, fxs,
response_method=response_method,
method=method,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# For multioutput regression, we can only check the validity of target
# now that we have the predictions.
# Also note: as multiclass-multioutput classifiers are not supported,
# multiclass and multioutput scenario are mutually exclusive. So there is
# no risk of overwriting target_idx here.
avg_preds, _ = pd_results[0] # checking the first result is enough
if is_regressor(estimator) and avg_preds.shape[0] > 1:
if target is None:
raise ValueError(
'target must be specified for multi-output regressors')
if not 0 <= target <= avg_preds.shape[0]:
raise ValueError(
'target must be in [0, n_tasks], got {}.'.format(target))
target_idx = target
# get global min and max average predictions of PD grouped by plot type
pdp_lim = {}
for avg_preds, values in pd_results:
min_pd = avg_preds[target_idx].min()
max_pd = avg_preds[target_idx].max()
n_fx = len(values)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
deciles = {}
for fx in chain.from_iterable(features):
if fx not in deciles:
X_col = _safe_indexing(X, fx, axis=1)
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
if fig is not None:
warnings.warn("The fig parameter is deprecated in version "
"0.22 and will be removed in version 0.24",
FutureWarning)
fig.clear()
ax = fig.gca()
display = PartialDependenceDisplay(pd_results, features, feature_names,
target_idx, pdp_lim, deciles)
return display.plot(ax=ax, n_cols=n_cols, line_kw=line_kw,
contour_kw=contour_kw)
|
57,720 |
def main():
""" Main Function"""
try:
LOG('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
ACCESS_TOKEN = demisto.params().get('access_token')
REFRESH_TOKEN = demisto.params().get('refresh_token')
if demisto.command() == 'get-dlp-report':
report_id = demisto.args().get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
def main():
""" Main Function"""
try:
LOG('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
ACCESS_TOKEN = demisto.params().get('access_token')
REFRESH_TOKEN = demisto.params().get('refresh_token')
if demisto.command() == 'get-dlp-report':
args = demisto.args()
report_id = args.get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
10,594 |
def check_command(module, commandline):
arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template',
'dnf': 'dnf', 'zypper': 'zypper'}
become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas', 'pmrun', 'machinectl']
if isinstance(commandline, list):
command = commandline[0]
else:
command = commandline.split()[0]
command = os.path.basename(command)
disable_suffix = "If you need to use {cmd} command because the module {mod} is insufficient you can add" \
" 'warn: false' to this command task or set 'command_warnings=False' in" \
" the defaults section of ansible.cfg to get rid of this message."
substitutions = {'mod': None, 'cmd': command}
if command in arguments:
msg = "Consider using the {mod} module with {subcmd} rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = 'file'
substitutions['subcmd'] = arguments[command]
module.warn(msg.format(**substitutions))
if command in commands:
msg = "Consider using the {mod} module rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = commands[command]
module.warn(msg.format(**substitutions))
if command in become:
module.warn("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,))
|
def check_command(module, commandline):
arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template',
'dnf': 'dnf', 'zypper': 'zypper'}
become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas', 'pmrun', 'machinectl']
if isinstance(commandline, list):
command = commandline[0]
else:
command = commandline.split()[0]
command = os.path.basename(command)
disable_suffix = "If you need to use '{cmd}' because the {mod} module is insufficient you can add" \
" 'warn: false' to this command task or set 'command_warnings=False' in" \
" the defaults section of ansible.cfg to get rid of this message."
substitutions = {'mod': None, 'cmd': command}
if command in arguments:
msg = "Consider using the {mod} module with {subcmd} rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = 'file'
substitutions['subcmd'] = arguments[command]
module.warn(msg.format(**substitutions))
if command in commands:
msg = "Consider using the {mod} module rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = commands[command]
module.warn(msg.format(**substitutions))
if command in become:
module.warn("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,))
|
49,441 |
def datetime_from_timestamp(t, units=1000):
if isinstance(t, time.struct_time):
v = time.mktime(t)
else:
v = t
value = datetime.datetime(1970, 1, 1,
tzinfo=datetime.timezone.utc
) + datetime.timedelta(seconds=(int(v) // units))
return value
|
def datetime_from_timestamp(t, units=1000):
if isinstance(t, time.struct_time):
v = int(time.mktime(t))
elif isinstance(t, (float, int)):
v = int(t)
else:
raise TypeError(t)
epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
return epoch + datetime.timedelta(seconds=v // units)
|
42,133 |
def get_rouge_w(evaluated_sentence, reference_sentence,
f=lambda x: x**2, inv_f=lambda x: math.sqrt(x)):
"""
Computes ROUGE-W of two sequences, namely evaluated_sentence and reference sentece
Reference: https://www.aclweb.org/anthology/W04-1013.pdf
Args:
evaluated_sentence: a sentence that have been produced by the summarizer
reference_sentence: a sentence from the reference set
Returns:
dictionary. {'f': f1_score, 'p': precision, 'r': recall} for ROUGE-W
"""
if not type(evaluated_sentence) == str:
raise ValueError("Hypothesis should be a sentence.")
if not type(reference_sentence) == str:
raise ValueError("reference should be a sentence")
eval_sentence = evaluated_sentence.split()
ref_sentence = reference_sentence.split()
n = len(ref_sentence)
m = len(eval_sentence)
wlcs = _w_lcs(eval_sentence, ref_sentence)
return _frp_rouge_w(wlcs, n, m, f, inv_f)
|
def get_rouge_w(evaluated_sentence, reference_sentence,
f=lambda x: x**2, inv_f=lambda x: math.sqrt(x)):
"""
Computes ROUGE-W of two sequences, namely evaluated_sentence and reference sentece
Reference: https://www.aclweb.org/anthology/W04-1013.pdf
Args:
evaluated_sentence: a sentence that have been produced by the summarizer
reference_sentence: a sentence from the reference set
Returns:
dictionary. {'f': f1_score, 'p': precision, 'r': recall} for ROUGE-W
"""
if not type(evaluated_sentence) == str:
raise ValueError("Hypothesis should be a sentence.")
if not type(reference_sentence) == str:
raise ValueError("reference should be a sentence")
eval_sentence = evaluated_sentence.split()
ref_sentence = reference_sentence.split()
n = len(ref_sentence)
m = len(eval_sentence)
wlcs = _w_lcs(eval_sentence, ref_sentence)
return _frp_rouge_w(wlcs, len(ref_sentence), len(eval_sentence), f, inv_f)
|
43,184 |
def get_world_info() -> Tuple[int, int, int]:
"""Returns a tuple of (data, tensor, pipeline, virtual pipeline)-parallel-world_size for logger."""
if model_parallel_is_initialized():
return (
get_data_parallel_world_size(),
get_tensor_model_parallel_world_size(),
get_pipeline_model_parallel_world_size(),
get_virtual_pipeline_model_parallel_world_size(),
)
return (0, 0, 0, 0)
|
def get_world_info() -> Tuple[int, int, int, int]:
"""Returns a tuple of (data, tensor, pipeline, virtual pipeline)-parallel-world_size for logger."""
if model_parallel_is_initialized():
return (
get_data_parallel_world_size(),
get_tensor_model_parallel_world_size(),
get_pipeline_model_parallel_world_size(),
get_virtual_pipeline_model_parallel_world_size(),
)
return (0, 0, 0, 0)
|
42,894 |
def about():
"""About box for Strawberry Fields.
Prints the installed version numbers for SF and its dependencies,
and some system info. Please include this information in bug reports.
"""
import sys
import platform
import os
import numpy
import scipy
# a QuTiP-style infobox
print('\nStrawberry Fields: a Python library for continuous variable quantum circuits.')
print('Copyright 2019 Xanadu Quantum Technologies Inc.\n')
print('Python version: {}.{}.{}'.format(*sys.version_info[0:3]))
print('Platform info: {}'.format(platform.platform()))
print('Installation path: {}'.format(os.path.dirname(__file__)))
print('Strawberry Fields version: {}'.format(__version__))
print('Numpy version: {}'.format(numpy.__version__))
print('Scipy version: {}'.format(scipy.__version__))
try:
import tensorflow
tf_version = tensorflow.__version__
except ModuleNotFoundError:
tf_version = None
print('TensorFlow version: {}'.format(tf_version))
|
def about():
"""About box for Strawberry Fields.
Prints the installed version numbers for SF and its dependencies,
and some system info. Please include this information in bug reports.
"""
import sys
import platform
import os
import numpy
import scipy
# a QuTiP-style infobox
print('\nStrawberry Fields: a Python library for continuous variable quantum circuits.')
print('Copyright 2018-2019 Xanadu Quantum Technologies Inc.\n')
print('Python version: {}.{}.{}'.format(*sys.version_info[0:3]))
print('Platform info: {}'.format(platform.platform()))
print('Installation path: {}'.format(os.path.dirname(__file__)))
print('Strawberry Fields version: {}'.format(__version__))
print('Numpy version: {}'.format(numpy.__version__))
print('Scipy version: {}'.format(scipy.__version__))
try:
import tensorflow
tf_version = tensorflow.__version__
except ModuleNotFoundError:
tf_version = None
print('TensorFlow version: {}'.format(tf_version))
|
51,404 |
def test_proxies_from_env_http_proxy_for_wss_proto(mocker) -> None:
url = URL('http://aiohttp.io/path')
mocker.patch.dict(os.environ, {'wss_proxy': str(url)})
ret = helpers.proxies_from_env()
assert ret.keys() == {'wss'}
assert ret['wss'].proxy == url
assert ret['wss'].proxy_auth is None
|
def test_proxies_from_env_http_proxy_for_wss_proto(monkeypatch) -> None:
url = URL('http://aiohttp.io/path')
monkeypatch.setenv('wss_proxy', str(url))
ret = helpers.proxies_from_env()
assert ret.keys() == {'wss'}
assert ret['wss'].proxy == url
assert ret['wss'].proxy_auth is None
|
41,235 |
def _gridqubits_to_graph_device(qubits: Iterable[cirq.GridQubit]):
# cirq contrib: routing.gridqubits_to_graph_device
def _manhattan_distance(qubit1: cirq.GridQubit, qubit2: cirq.GridQubit) -> int:
return abs(qubit1.row - qubit2.row) + abs(qubit1.col - qubit2.col)
return nx.Graph(
pair for pair in itertools.combinations(qubits, 2) if _manhattan_distance(*pair) == 1
)
|
def _gridqubits_to_graph_device(qubits: Iterable[cirq.GridQubit]):
# cirq contrib: routing.gridqubits_to_graph_device
def _manhattan_distance(qubit1: cirq.GridQubit, qubit2: cirq.GridQubit) -> int:
return abs(qubit1.row - qubit2.row) + abs(qubit1.col - qubit2.col)
return nx.Graph(
pair for pair in itertools.combinations(qubits, 2) if pair[0].is_adjacent(pair[1])
)
|
54,137 |
def get_dataflow(config):
# - Get train/test datasets
if idist.get_local_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_local_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
|
def get_dataflow(config):
# - Get train/test datasets
if idist.get_local_rank() > 0:
# Ensure that only local rank 0 download the dataset
# Thus each node will download a copy of the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_local_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
|
25,309 |
def text_to_file(text: str, filename: str = "file.txt", *, spoiler: bool = False):
"""Prepares text to be sent as a file on Discord, without character limit.
This writes text into a bytes object that can be used for the ``file`` or ``files`` parameters\
of :func:`discord.abc.send`.
Parameters
----------
text: str
The text to put in your file.
filename: str
The name of the file sent. Defaults to ``file.txt``.
spoiler: bool
Whether the attachment is a spoiler. Defaults to ``False``.
Returns
-------
discord.File
"""
file = BytesIO(bytes(text))
return discord.File(file, filename, spoiler=spoiler)
|
def text_to_file(text: str, filename: str = "file.txt", *, spoiler: bool = False):
"""Prepares text to be sent as a file on Discord, without character limit.
This writes text into a bytes object that can be used for the ``file`` or ``files`` parameters\
of :func:`discord.abc.send`.
Parameters
----------
text: str
The text to put in your file.
filename: str
The name of the file sent. Defaults to ``file.txt``.
spoiler: bool
Whether the attachment is a spoiler. Defaults to ``False``.
Returns
-------
discord.File
"""
file = BytesIO(text.encode(encoding))
return discord.File(file, filename, spoiler=spoiler)
|
41,765 |
def get_storage(storage):
# type: (Union[None, str, BaseStorage]) -> BaseStorage
if storage is None:
return InMemoryStorage()
if isinstance(storage, str):
if storage[:5] == 'redis':
return RedisStorage(redis_url=storage)
else:
return RDBStorage(storage)
else:
return storage
|
def get_storage(storage):
# type: (Union[None, str, BaseStorage]) -> BaseStorage
if storage is None:
return InMemoryStorage()
if isinstance(storage, str):
if storage.startswith('redis'):
return RedisStorage(redis_url=storage)
else:
return RDBStorage(storage)
else:
return storage
|
17,815 |
def run(plotIt=True, saveFig=False, cleanup=True):
"""
Run 1D inversions for a single sounding of the RESOLVE and SkyTEM
bookpurnong data
:param bool plotIt: show the plots?
:param bool saveFig: save the figure
:param bool cleanup: remove the downloaded results
"""
downloads, directory = download_and_unzip_data()
resolve = h5py.File(
os.path.sep.join([directory, "booky_resolve.hdf5"]),
"r"
)
skytem = h5py.File(
os.path.sep.join([directory, "booky_skytem.hdf5"]),
"r"
)
river_path = resolve["river_path"].value
# Choose a sounding location to invert
xloc, yloc = 462100.0, 6196500.0
rxind_skytem = np.argmin(
abs(skytem["xy"][:, 0]-xloc)+abs(skytem["xy"][:, 1]-yloc)
)
rxind_resolve = np.argmin(
abs(resolve["xy"][:, 0]-xloc)+abs(resolve["xy"][:, 1]-yloc)
)
# Plot both resolve and skytem data on 2D plane
fig = plt.figure(figsize=(13, 6))
title = ["RESOLVE In-phase 400 Hz", "SkyTEM High moment 156 $\mu$s"]
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
axs = [ax1, ax2]
out_re = utils.plot2Ddata(
resolve["xy"], resolve["data"][:, 0], ncontour=100,
contourOpts={"cmap": "viridis"}, ax=ax1
)
vmin, vmax = out_re[0].get_clim()
cb_re = plt.colorbar(
out_re[0], ticks=np.linspace(vmin, vmax, 3), ax=ax1,
fraction=0.046, pad=0.04
)
temp_skytem = skytem["data"][:, 5].copy()
temp_skytem[skytem["data"][:, 5] > 7e-10] = 7e-10
out_sky = utils.plot2Ddata(
skytem["xy"][:, :2], temp_skytem, ncontour=100,
contourOpts={"cmap": "viridis", "vmax": 7e-10}, ax=ax2
)
vmin, vmax = out_sky[0].get_clim()
cb_sky = plt.colorbar(
out_sky[0], ticks=np.linspace(vmin, vmax*0.99, 3), ax=ax2,
format="%.1e", fraction=0.046, pad=0.04
)
cb_re.set_label("Bz (ppm)")
cb_sky.set_label("dB$_z$ / dt (V/A-m$^4$)")
for i, ax in enumerate(axs):
xticks = [460000, 463000]
yticks = [6195000, 6198000, 6201000]
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.plot(xloc, yloc, 'wo')
ax.plot(river_path[:, 0], river_path[:, 1], 'k', lw=0.5)
ax.set_aspect("equal")
if i == 1:
ax.plot(
skytem["xy"][:, 0], skytem["xy"][:, 1], 'k.',
alpha=0.02, ms=1
)
ax.set_yticklabels([str(" ") for f in yticks])
else:
ax.plot(
resolve["xy"][:, 0], resolve["xy"][:, 1], 'k.', alpha=0.02,
ms=1
)
ax.set_yticklabels([str(f) for f in yticks])
ax.set_ylabel("Northing (m)")
ax.set_xlabel("Easting (m)")
ax.set_title(title[i])
ax.axis('equal')
# plt.tight_layout()
if saveFig is True:
fig.savefig("resolve_skytem_data.png", dpi=600)
# ------------------ Mesh ------------------ #
# Step1: Set 2D cylindrical mesh
cs, ncx, ncz, npad = 1., 10., 10., 20
hx = [(cs, ncx), (cs, npad, 1.3)]
npad = 12
temp = np.logspace(np.log10(1.), np.log10(12.), 19)
temp_pad = temp[-1] * 1.3 ** np.arange(npad)
hz = np.r_[temp_pad[::-1], temp[::-1], temp, temp_pad]
mesh = discretize.CylMesh([hx, 1, hz], '00C')
active = mesh.vectorCCz < 0.
# Step2: Set a SurjectVertical1D mapping
# Note: this sets our inversion model as 1D log conductivity
# below subsurface
active = mesh.vectorCCz < 0.
actMap = maps.InjectActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz)
mapping = maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * actMap
sig_half = 1e-1
sig_air = 1e-8
sigma = np.ones(mesh.nCz)*sig_air
sigma[active] = sig_half
# Initial and reference model
m0 = np.log(sigma[active])
# ------------------ RESOLVE Forward Simulation ------------------ #
# Step3: Invert Resolve data
# Bird height from the surface
b_height_resolve = resolve["src_elevation"].value
src_height_resolve = b_height_resolve[rxind_resolve]
# Set Rx (In-phase and Quadrature)
rxOffset = 7.86
bzr = FDEM.Rx.PointMagneticFluxDensitySecondary(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='real'
)
bzi = FDEM.Rx.PointMagneticFluxDensity(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='imag'
)
# Set Source (In-phase and Quadrature)
frequency_cp = resolve["frequency_cp"].value
freqs = frequency_cp.copy()
srcLoc = np.array([0., 0., src_height_resolve])
srcList = [FDEM.Src.MagDipole([bzr, bzi], freq, srcLoc, orientation='Z')
for freq in freqs]
# Set FDEM survey (In-phase and Quadrature)
survey = FDEM.Survey(srcList)
prb = FDEM.Simulation3DMagneticFluxDensity(
mesh, sigmaMap=mapping, Solver=Solver
)
prb.survey = survey
# ------------------ RESOLVE Inversion ------------------ #
# Primary field
bp = - mu_0/(4*np.pi*rxOffset**3)
# Observed data
cpi_inds = [0, 2, 6, 8, 10]
cpq_inds = [1, 3, 7, 9, 11]
dobs_re = np.c_[
resolve["data"][rxind_resolve, :][cpi_inds],
resolve["data"][rxind_resolve, :][cpq_inds]
].flatten() * bp * 1e-6
# Uncertainty
relative = np.repeat(np.r_[np.ones(3)*0.1, np.ones(2)*0.15], 2)
floor = 20 * abs(bp) * 1e-6
uncert = abs(dobs_re) * relative + floor
# Data Misfit
data_resolve = data.Data(dobs=dobs_re, survey=survey, standard_deviation=uncert)
dmisfit = data_misfit.L2DataMisfit(simulation=prb, data=data_resolve)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Inversion directives and parameters
target = directives.TargetMisfit() # stop when we hit target misfit
invProb.beta = 2.
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-3
reg.alpha_x = 1.
reg.mref = m0.copy()
opt.LSshorten = 0.5
opt.remember('xc')
# run the inversion
mopt_re = inv.run(m0)
dpred_re = invProb.dpred
# ------------------ SkyTEM Forward Simulation ------------------ #
# Step4: Invert SkyTEM data
# Bird height from the surface
b_height_skytem = skytem["src_elevation"].value
src_height = b_height_skytem[rxind_skytem]
srcLoc = np.array([0., 0., src_height])
# Radius of the source loop
area = skytem["area"].value
radius = np.sqrt(area/np.pi)
rxLoc = np.array([[radius, 0., src_height]])
# Parameters for current waveform
t0 = skytem["t0"].value
times = skytem["times"].value
waveform_skytem = skytem["waveform"].value
offTime = t0
times_off = times - t0
# Note: we are Using theoretical VTEM waveform,
# but effectively fits SkyTEM waveform
peakTime = 1.0000000e-02
a = 3.
dbdt_z = TDEM.Rx.PointMagneticFluxTimeDerivative(
locations=rxLoc, times=times_off[:-3]+offTime, orientation='z'
) # vertical db_dt
rxList = [dbdt_z] # list of receivers
srcList = [
TDEM.Src.CircularLoop(
rxList, loc=srcLoc, radius=radius,
orientation='z',
waveform=TDEM.Src.VTEMWaveform(
offTime=offTime, peakTime=peakTime, a=3.
)
)
]
# solve the problem at these times
timeSteps = [
(peakTime/5, 5), ((offTime-peakTime)/5, 5),
(1e-5, 5), (5e-5, 5), (1e-4, 10), (5e-4, 15)
]
prob = TDEM.Simulation3DElectricField(
mesh, time_steps=timeSteps, sigmaMap=mapping, Solver=Solver
)
survey = TDEM.Survey(srcList)
prob.survey = survey
src = srcList[0]
rx = src.receiver_list[0]
wave = []
for time in prob.times:
wave.append(src.waveform.eval(time))
wave = np.hstack(wave)
out = prob.dpred(m0)
# plot the waveform
fig = plt.figure(figsize=(5, 3))
times_off = times-t0
plt.plot(waveform_skytem[:, 0], waveform_skytem[:, 1], 'k.')
plt.plot(prob.times, wave, 'k-', lw=2)
plt.legend(("SkyTEM waveform", "Waveform (fit)"), fontsize=10)
for t in rx.times:
plt.plot(np.ones(2)*t, np.r_[-0.03, 0.03], 'k-')
plt.ylim(-0.1, 1.1)
plt.grid(True)
plt.xlabel("Time (s)")
plt.ylabel("Normalized current")
if saveFig:
fig.savefig("skytem_waveform", dpi=200)
# Observed data
dobs_sky = skytem["data"][rxind_skytem, :-3] * area
# ------------------ SkyTEM Inversion ------------------ #
# Uncertainty
relative = 0.12
floor = 7.5e-12
uncert = abs(dobs_sky) * relative + floor
# Data Misfit
data_sky = data.Data(dobs=-dobs_sky, survey=survey, standard_deviation=uncert)
dmisfit = data_misfit.L2DataMisfit(simulation=prob, data=data_sky)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Directives and Inversion Parameters
target = directives.TargetMisfit()
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
invProb.beta = 20.
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-1
reg.alpha_x = 1.
opt.LSshorten = 0.5
opt.remember('xc')
reg.mref = mopt_re # Use RESOLVE model as a reference model
# run the inversion
mopt_sky = inv.run(m0)
dpred_sky = invProb.dpred
# Plot the figure from the paper
plt.figure(figsize=(12, 8))
fs = 13 # fontsize
matplotlib.rcParams['font.size'] = fs
ax0 = plt.subplot2grid((2, 2), (0, 0), rowspan=2)
ax1 = plt.subplot2grid((2, 2), (0, 1))
ax2 = plt.subplot2grid((2, 2), (1, 1))
# Recovered Models
sigma_re = np.repeat(np.exp(mopt_re), 2, axis=0)
sigma_sky = np.repeat(np.exp(mopt_sky), 2, axis=0)
z = np.repeat(mesh.vectorCCz[active][1:], 2, axis=0)
z = np.r_[mesh.vectorCCz[active][0], z, mesh.vectorCCz[active][-1]]
ax0.semilogx(sigma_re, z, 'k', lw=2, label="RESOLVE")
ax0.semilogx(sigma_sky, z, 'b', lw=2, label="SkyTEM")
ax0.set_ylim(-50, 0)
# ax0.set_xlim(5e-4, 1e2)
ax0.grid(True)
ax0.set_ylabel("Depth (m)")
ax0.set_xlabel("Conducivity (S/m)")
ax0.legend(loc=3)
ax0.set_title("(a) Recovered Models")
# RESOLVE Data
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 0]/bp*1e6, 'k-',
label="Obs (real)"
)
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 1]/bp*1e6, 'k--',
label="Obs (imag)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 0]/bp*1e6, 'k+', ms=10,
markeredgewidth=2., label="Pred (real)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 1]/bp*1e6, 'ko', ms=6,
markeredgecolor='k', markeredgewidth=0.5, label="Pred (imag)"
)
ax1.set_title("(b) RESOLVE")
ax1.set_xlabel("Frequency (Hz)")
ax1.set_ylabel("Bz (ppm)")
ax1.grid(True)
ax1.legend(loc=3, fontsize=11)
# SkyTEM data
ax2.loglog(times_off[3:]*1e6, dobs_sky/area, 'b-', label="Obs")
ax2.loglog(
times_off[3:]*1e6, -dpred_sky/area, 'bo', ms=4,
markeredgecolor='k', markeredgewidth=0.5, label="Pred"
)
ax2.set_xlim(times_off.min()*1e6*1.2, times_off.max()*1e6*1.1)
ax2.set_xlabel("Time ($\mu s$)")
ax2.set_ylabel("dBz / dt (V/A-m$^4$)")
ax2.set_title("(c) SkyTEM High-moment")
ax2.grid(True)
ax2.legend(loc=3)
a3 = plt.axes([0.86, .33, .1, .09], facecolor=[0.8, 0.8, 0.8, 0.6])
a3.plot(prob.times*1e6, wave, 'k-')
a3.plot(
rx.times*1e6, np.zeros_like(rx.times), 'k|', markeredgewidth=1,
markersize=12
)
a3.set_xlim([prob.times.min()*1e6*0.75, prob.times.max()*1e6*1.1])
a3.set_title('(d) Waveform', fontsize=11)
a3.set_xticks([prob.times.min()*1e6, t0*1e6, prob.times.max()*1e6])
a3.set_yticks([])
# a3.set_xticklabels(['0', '2e4'])
a3.set_xticklabels(['-1e4', '0', '1e4'])
plt.tight_layout()
if saveFig:
plt.savefig("booky1D_time_freq.png", dpi=600)
if plotIt:
plt.show()
resolve.close()
skytem.close()
if cleanup:
print( os.path.split(directory)[:-1])
os.remove(
os.path.sep.join(
directory.split()[:-1] + ["._bookpurnong_inversion"]
)
)
os.remove(downloads)
shutil.rmtree(directory)
|
def run(plotIt=True, saveFig=False, cleanup=True):
"""
Run 1D inversions for a single sounding of the RESOLVE and SkyTEM
bookpurnong data
:param bool plotIt: show the plots?
:param bool saveFig: save the figure
:param bool cleanup: remove the downloaded results
"""
downloads, directory = download_and_unzip_data()
resolve = h5py.File(
os.path.sep.join([directory, "booky_resolve.hdf5"]),
"r"
)
skytem = h5py.File(
os.path.sep.join([directory, "booky_skytem.hdf5"]),
"r"
)
river_path = resolve["river_path"].value
# Choose a sounding location to invert
xloc, yloc = 462100.0, 6196500.0
rxind_skytem = np.argmin(
abs(skytem["xy"][:, 0]-xloc)+abs(skytem["xy"][:, 1]-yloc)
)
rxind_resolve = np.argmin(
abs(resolve["xy"][:, 0]-xloc)+abs(resolve["xy"][:, 1]-yloc)
)
# Plot both resolve and skytem data on 2D plane
fig = plt.figure(figsize=(13, 6))
title = ["RESOLVE In-phase 400 Hz", "SkyTEM High moment 156 $\mu$s"]
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
axs = [ax1, ax2]
out_re = utils.plot2Ddata(
resolve["xy"], resolve["data"][:, 0], ncontour=100,
contourOpts={"cmap": "viridis"}, ax=ax1
)
vmin, vmax = out_re[0].get_clim()
cb_re = plt.colorbar(
out_re[0], ticks=np.linspace(vmin, vmax, 3), ax=ax1,
fraction=0.046, pad=0.04
)
temp_skytem = skytem["data"][:, 5].copy()
temp_skytem[skytem["data"][:, 5] > 7e-10] = 7e-10
out_sky = utils.plot2Ddata(
skytem["xy"][:, :2], temp_skytem, ncontour=100,
contourOpts={"cmap": "viridis", "vmax": 7e-10}, ax=ax2
)
vmin, vmax = out_sky[0].get_clim()
cb_sky = plt.colorbar(
out_sky[0], ticks=np.linspace(vmin, vmax*0.99, 3), ax=ax2,
format="%.1e", fraction=0.046, pad=0.04
)
cb_re.set_label("Bz (ppm)")
cb_sky.set_label("dB$_z$ / dt (V/A-m$^4$)")
for i, ax in enumerate(axs):
xticks = [460000, 463000]
yticks = [6195000, 6198000, 6201000]
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.plot(xloc, yloc, 'wo')
ax.plot(river_path[:, 0], river_path[:, 1], 'k', lw=0.5)
ax.set_aspect("equal")
if i == 1:
ax.plot(
skytem["xy"][:, 0], skytem["xy"][:, 1], 'k.',
alpha=0.02, ms=1
)
ax.set_yticklabels([str(" ") for f in yticks])
else:
ax.plot(
resolve["xy"][:, 0], resolve["xy"][:, 1], 'k.', alpha=0.02,
ms=1
)
ax.set_yticklabels([str(f) for f in yticks])
ax.set_ylabel("Northing (m)")
ax.set_xlabel("Easting (m)")
ax.set_title(title[i])
ax.axis('equal')
# plt.tight_layout()
if saveFig is True:
fig.savefig("resolve_skytem_data.png", dpi=600)
# ------------------ Mesh ------------------ #
# Step1: Set 2D cylindrical mesh
cs, ncx, ncz, npad = 1., 10., 10., 20
hx = [(cs, ncx), (cs, npad, 1.3)]
npad = 12
temp = np.logspace(np.log10(1.), np.log10(12.), 19)
temp_pad = temp[-1] * 1.3 ** np.arange(npad)
hz = np.r_[temp_pad[::-1], temp[::-1], temp, temp_pad]
mesh = discretize.CylMesh([hx, 1, hz], '00C')
active = mesh.vectorCCz < 0.
# Step2: Set a SurjectVertical1D mapping
# Note: this sets our inversion model as 1D log conductivity
# below subsurface
active = mesh.vectorCCz < 0.
actMap = maps.InjectActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz)
mapping = maps.ExpMap(mesh) * maps.SurjectVertical1D(mesh) * actMap
sig_half = 1e-1
sig_air = 1e-8
sigma = np.ones(mesh.nCz)*sig_air
sigma[active] = sig_half
# Initial and reference model
m0 = np.log(sigma[active])
# ------------------ RESOLVE Forward Simulation ------------------ #
# Step3: Invert Resolve data
# Bird height from the surface
b_height_resolve = resolve["src_elevation"].value
src_height_resolve = b_height_resolve[rxind_resolve]
# Set Rx (In-phase and Quadrature)
rxOffset = 7.86
bzr = FDEM.Rx.PointMagneticFluxDensitySecondary(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='real'
)
bzi = FDEM.Rx.PointMagneticFluxDensity(
np.array([[rxOffset, 0., src_height_resolve]]),
orientation='z',
component='imag'
)
# Set Source (In-phase and Quadrature)
frequency_cp = resolve["frequency_cp"].value
freqs = frequency_cp.copy()
srcLoc = np.array([0., 0., src_height_resolve])
srcList = [FDEM.Src.MagDipole([bzr, bzi], freq, srcLoc, orientation='Z')
for freq in freqs]
# Set FDEM survey (In-phase and Quadrature)
survey = FDEM.Survey(srcList)
prb = FDEM.Simulation3DMagneticFluxDensity(
mesh, sigmaMap=mapping, Solver=Solver
)
prb.survey = survey
# ------------------ RESOLVE Inversion ------------------ #
# Primary field
bp = - mu_0/(4*np.pi*rxOffset**3)
# Observed data
cpi_inds = [0, 2, 6, 8, 10]
cpq_inds = [1, 3, 7, 9, 11]
dobs_re = np.c_[
resolve["data"][rxind_resolve, :][cpi_inds],
resolve["data"][rxind_resolve, :][cpq_inds]
].flatten() * bp * 1e-6
# Uncertainty
relative = np.repeat(np.r_[np.ones(3)*0.1, np.ones(2)*0.15], 2)
floor = 20 * abs(bp) * 1e-6
std = abs(dobs_re) * relative + floor
# Data Misfit
data_resolve = data.Data(dobs=dobs_re, survey=survey, standard_deviation=uncert)
dmisfit = data_misfit.L2DataMisfit(simulation=prb, data=data_resolve)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Inversion directives and parameters
target = directives.TargetMisfit() # stop when we hit target misfit
invProb.beta = 2.
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-3
reg.alpha_x = 1.
reg.mref = m0.copy()
opt.LSshorten = 0.5
opt.remember('xc')
# run the inversion
mopt_re = inv.run(m0)
dpred_re = invProb.dpred
# ------------------ SkyTEM Forward Simulation ------------------ #
# Step4: Invert SkyTEM data
# Bird height from the surface
b_height_skytem = skytem["src_elevation"].value
src_height = b_height_skytem[rxind_skytem]
srcLoc = np.array([0., 0., src_height])
# Radius of the source loop
area = skytem["area"].value
radius = np.sqrt(area/np.pi)
rxLoc = np.array([[radius, 0., src_height]])
# Parameters for current waveform
t0 = skytem["t0"].value
times = skytem["times"].value
waveform_skytem = skytem["waveform"].value
offTime = t0
times_off = times - t0
# Note: we are Using theoretical VTEM waveform,
# but effectively fits SkyTEM waveform
peakTime = 1.0000000e-02
a = 3.
dbdt_z = TDEM.Rx.PointMagneticFluxTimeDerivative(
locations=rxLoc, times=times_off[:-3]+offTime, orientation='z'
) # vertical db_dt
rxList = [dbdt_z] # list of receivers
srcList = [
TDEM.Src.CircularLoop(
rxList, loc=srcLoc, radius=radius,
orientation='z',
waveform=TDEM.Src.VTEMWaveform(
offTime=offTime, peakTime=peakTime, a=3.
)
)
]
# solve the problem at these times
timeSteps = [
(peakTime/5, 5), ((offTime-peakTime)/5, 5),
(1e-5, 5), (5e-5, 5), (1e-4, 10), (5e-4, 15)
]
prob = TDEM.Simulation3DElectricField(
mesh, time_steps=timeSteps, sigmaMap=mapping, Solver=Solver
)
survey = TDEM.Survey(srcList)
prob.survey = survey
src = srcList[0]
rx = src.receiver_list[0]
wave = []
for time in prob.times:
wave.append(src.waveform.eval(time))
wave = np.hstack(wave)
out = prob.dpred(m0)
# plot the waveform
fig = plt.figure(figsize=(5, 3))
times_off = times-t0
plt.plot(waveform_skytem[:, 0], waveform_skytem[:, 1], 'k.')
plt.plot(prob.times, wave, 'k-', lw=2)
plt.legend(("SkyTEM waveform", "Waveform (fit)"), fontsize=10)
for t in rx.times:
plt.plot(np.ones(2)*t, np.r_[-0.03, 0.03], 'k-')
plt.ylim(-0.1, 1.1)
plt.grid(True)
plt.xlabel("Time (s)")
plt.ylabel("Normalized current")
if saveFig:
fig.savefig("skytem_waveform", dpi=200)
# Observed data
dobs_sky = skytem["data"][rxind_skytem, :-3] * area
# ------------------ SkyTEM Inversion ------------------ #
# Uncertainty
relative = 0.12
floor = 7.5e-12
uncert = abs(dobs_sky) * relative + floor
# Data Misfit
data_sky = data.Data(dobs=-dobs_sky, survey=survey, standard_deviation=uncert)
dmisfit = data_misfit.L2DataMisfit(simulation=prob, data=data_sky)
# Regularization
regMesh = discretize.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = regularization.Simple(regMesh, mapping=maps.IdentityMap(regMesh))
# Optimization
opt = optimization.InexactGaussNewton(maxIter=5)
# statement of the inverse problem
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
# Directives and Inversion Parameters
target = directives.TargetMisfit()
# betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
invProb.beta = 20.
inv = inversion.BaseInversion(invProb, directiveList=[target])
reg.alpha_s = 1e-1
reg.alpha_x = 1.
opt.LSshorten = 0.5
opt.remember('xc')
reg.mref = mopt_re # Use RESOLVE model as a reference model
# run the inversion
mopt_sky = inv.run(m0)
dpred_sky = invProb.dpred
# Plot the figure from the paper
plt.figure(figsize=(12, 8))
fs = 13 # fontsize
matplotlib.rcParams['font.size'] = fs
ax0 = plt.subplot2grid((2, 2), (0, 0), rowspan=2)
ax1 = plt.subplot2grid((2, 2), (0, 1))
ax2 = plt.subplot2grid((2, 2), (1, 1))
# Recovered Models
sigma_re = np.repeat(np.exp(mopt_re), 2, axis=0)
sigma_sky = np.repeat(np.exp(mopt_sky), 2, axis=0)
z = np.repeat(mesh.vectorCCz[active][1:], 2, axis=0)
z = np.r_[mesh.vectorCCz[active][0], z, mesh.vectorCCz[active][-1]]
ax0.semilogx(sigma_re, z, 'k', lw=2, label="RESOLVE")
ax0.semilogx(sigma_sky, z, 'b', lw=2, label="SkyTEM")
ax0.set_ylim(-50, 0)
# ax0.set_xlim(5e-4, 1e2)
ax0.grid(True)
ax0.set_ylabel("Depth (m)")
ax0.set_xlabel("Conducivity (S/m)")
ax0.legend(loc=3)
ax0.set_title("(a) Recovered Models")
# RESOLVE Data
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 0]/bp*1e6, 'k-',
label="Obs (real)"
)
ax1.loglog(
frequency_cp, dobs_re.reshape((5, 2))[:, 1]/bp*1e6, 'k--',
label="Obs (imag)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 0]/bp*1e6, 'k+', ms=10,
markeredgewidth=2., label="Pred (real)"
)
ax1.loglog(
frequency_cp, dpred_re.reshape((5, 2))[:, 1]/bp*1e6, 'ko', ms=6,
markeredgecolor='k', markeredgewidth=0.5, label="Pred (imag)"
)
ax1.set_title("(b) RESOLVE")
ax1.set_xlabel("Frequency (Hz)")
ax1.set_ylabel("Bz (ppm)")
ax1.grid(True)
ax1.legend(loc=3, fontsize=11)
# SkyTEM data
ax2.loglog(times_off[3:]*1e6, dobs_sky/area, 'b-', label="Obs")
ax2.loglog(
times_off[3:]*1e6, -dpred_sky/area, 'bo', ms=4,
markeredgecolor='k', markeredgewidth=0.5, label="Pred"
)
ax2.set_xlim(times_off.min()*1e6*1.2, times_off.max()*1e6*1.1)
ax2.set_xlabel("Time ($\mu s$)")
ax2.set_ylabel("dBz / dt (V/A-m$^4$)")
ax2.set_title("(c) SkyTEM High-moment")
ax2.grid(True)
ax2.legend(loc=3)
a3 = plt.axes([0.86, .33, .1, .09], facecolor=[0.8, 0.8, 0.8, 0.6])
a3.plot(prob.times*1e6, wave, 'k-')
a3.plot(
rx.times*1e6, np.zeros_like(rx.times), 'k|', markeredgewidth=1,
markersize=12
)
a3.set_xlim([prob.times.min()*1e6*0.75, prob.times.max()*1e6*1.1])
a3.set_title('(d) Waveform', fontsize=11)
a3.set_xticks([prob.times.min()*1e6, t0*1e6, prob.times.max()*1e6])
a3.set_yticks([])
# a3.set_xticklabels(['0', '2e4'])
a3.set_xticklabels(['-1e4', '0', '1e4'])
plt.tight_layout()
if saveFig:
plt.savefig("booky1D_time_freq.png", dpi=600)
if plotIt:
plt.show()
resolve.close()
skytem.close()
if cleanup:
print( os.path.split(directory)[:-1])
os.remove(
os.path.sep.join(
directory.split()[:-1] + ["._bookpurnong_inversion"]
)
)
os.remove(downloads)
shutil.rmtree(directory)
|
39,068 |
def import_from_string(import_str: Any) -> Any:
if not isinstance(import_str, str):
return import_str
module_str, _, attrs_str = import_str.partition(":")
if not module_str or not attrs_str:
message = (
'Import string "{import_str}" must be in format "<module>:<attribute>".'
)
raise ImportFromStringError(message.format(import_str=import_str))
try:
module = importlib.import_module(module_str)
except ImportError as exc:
if exc.name != module_str:
raise exc from None
message = 'Could not import module "{module_str}".'
raise ImportFromStringError(message.format(module_str=module_str))
instance = module
try:
for attr_str in attrs_str.split("."):
instance = getattr(instance, attr_str)
except AttributeError:
message = 'Attribute "{attrs_str}" not found in module "{module_str}".'
raise ImportFromStringError(
message.format(attrs_str=attrs_str, module_str=module_str)
)
return instance
|
def import_from_string(import_str: Union[str, ModuleType]) -> ModuleType:
if not isinstance(import_str, str):
return import_str
module_str, _, attrs_str = import_str.partition(":")
if not module_str or not attrs_str:
message = (
'Import string "{import_str}" must be in format "<module>:<attribute>".'
)
raise ImportFromStringError(message.format(import_str=import_str))
try:
module = importlib.import_module(module_str)
except ImportError as exc:
if exc.name != module_str:
raise exc from None
message = 'Could not import module "{module_str}".'
raise ImportFromStringError(message.format(module_str=module_str))
instance = module
try:
for attr_str in attrs_str.split("."):
instance = getattr(instance, attr_str)
except AttributeError:
message = 'Attribute "{attrs_str}" not found in module "{module_str}".'
raise ImportFromStringError(
message.format(attrs_str=attrs_str, module_str=module_str)
)
return instance
|
55,883 |
def compute_CdS_from_drop_test(
terminal_velocity, rocket_mass, air_density=1.225, gravity=9.80665
):
"""Returns the parachute's CdS calculated through its final speed, air
density in the landing point, the rocket's mass and the force of gravity
in the landing point.
Parameters
----------
terminal_velocity : float
Rocket's speed in m/s when landing.
rocket_mass : float
Rocket's mass in kg.
air_density : float, optional
Air density, in kg/m^3, right before the rocket lands. Default value is 1.225.
gravity : float, optional
Gravitational acceleration experienced by the rocket and parachute during
descent in m/s^2. Default value is the standard gravity, 9.80665.
Returns
-------
CdS : float
Number equal to drag coefficient times reference area for parachute.
"""
CdS = 2 * rocket_mass * gravity / ((terminal_velocity**2) * air_density)
return CdS
|
def compute_CdS_from_drop_test(
terminal_velocity, rocket_mass, air_density=1.225, gravity=9.80665
):
"""Returns the parachute's CdS calculated through its final speed, air
density in the landing point, the rocket's mass and the force of gravity
in the landing point.
Parameters
----------
terminal_velocity : float
Rocket's speed in m/s when landing.
system rocket-parachute in which the terminal velocity is reached.
rocket_mass : float
Rocket's mass in kg.
air_density : float, optional
Air density, in kg/m^3, right before the rocket lands. Default value is 1.225.
gravity : float, optional
Gravitational acceleration experienced by the rocket and parachute during
descent in m/s^2. Default value is the standard gravity, 9.80665.
Returns
-------
CdS : float
Number equal to drag coefficient times reference area for parachute.
"""
CdS = 2 * rocket_mass * gravity / ((terminal_velocity**2) * air_density)
return CdS
|
58,568 |
def commit_step(store: workflow_storage.WorkflowStorage, step_id: "StepID",
ret: Union["Workflow", Any], *, exception: Optional[Exception],
checkpoint: "CheckpointModeType"):
"""Checkpoint the step output.
Args:
store: The storage the current workflow is using.
step_id: The ID of the step.
ret: The returned object of the workflow step.
exception: The exception caught by the step.
checkpoint: The checkpoint mode.
"""
from ray.workflow.common import Workflow
if checkpoint == CheckpointMode.DEFAULT.value:
# Default value implies "SYNC".
checkpoint = CheckpointMode.SYNC.value
if checkpoint == CheckpointMode.SKIP.value:
# Check if the checkpoint options are valid.
if isinstance(ret, Workflow):
for w in ret._iter_workflows_in_dag():
print(w.data.step_options.checkpoint)
if w != ret and w.data.step_options.checkpoint:
raise ValueError(
f"Step '{w.step_id}' requires checkpointing, however "
f"this is invalid. It is because the parent step "
f"'{step_id}' skips checkpointing, and the DAG of "
f"the sub-workflow is not preserved in the storage. "
f"Without the DAG, we cannot cover '{w.step_id}' "
f"from its checkpoint. NOTE: This rule does not "
f"affect the output step of the sub-workflow, "
f"because once the output step finished, we can "
f"recover from the checkpoint of the output step "
f"instead of the parent step.")
return
elif checkpoint != CheckpointMode.SYNC.value:
raise ValueError(f"Unknown checkpoint mode: {checkpoint}.")
if isinstance(ret, Workflow):
assert not ret.executed
tasks = [
_write_step_inputs(store, w.step_id, w.data)
for w in ret._iter_workflows_in_dag()
]
asyncio.get_event_loop().run_until_complete(asyncio.gather(*tasks))
context = workflow_context.get_workflow_step_context()
store.save_step_output(
step_id,
ret,
exception=exception,
outer_most_step_id=context.outer_most_step_id)
|
def commit_step(store: workflow_storage.WorkflowStorage, step_id: "StepID",
ret: Union["Workflow", Any], *, exception: Optional[Exception],
checkpoint: "CheckpointModeType"):
"""Checkpoint the step output.
Args:
store: The storage the current workflow is using.
step_id: The ID of the step.
ret: The returned object of the workflow step.
exception: The exception caught by the step.
checkpoint: The checkpoint mode.
"""
from ray.workflow.common import Workflow
if checkpoint == CheckpointMode.DEFAULT.value:
# Default value implies "SYNC".
checkpoint = CheckpointMode.SYNC.value
if checkpoint == CheckpointMode.SKIP.value:
# Check if the checkpoint options are valid.
if isinstance(ret, Workflow):
for w in ret._iter_workflows_in_dag():
print(w.data.step_options.checkpoint)
if w != ret and w.data.step_options.checkpoint:
raise ValueError(
f"Step '{w.step_id}' requires checkpointing, however "
f"this is invalid. This is because the parent step "
f"'{step_id}' skips checkpointing, so the DAG of "
f"the returned sub-workflow is not preserved in storage. "
f"Without the DAG, we cannot cover '{w.step_id}' "
f"from its checkpoint. NOTE: This rule does not "
f"affect the output step of the sub-workflow, "
f"because once the output step finished, we can "
f"recover from the checkpoint of the output step "
f"instead of the parent step.")
return
elif checkpoint != CheckpointMode.SYNC.value:
raise ValueError(f"Unknown checkpoint mode: {checkpoint}.")
if isinstance(ret, Workflow):
assert not ret.executed
tasks = [
_write_step_inputs(store, w.step_id, w.data)
for w in ret._iter_workflows_in_dag()
]
asyncio.get_event_loop().run_until_complete(asyncio.gather(*tasks))
context = workflow_context.get_workflow_step_context()
store.save_step_output(
step_id,
ret,
exception=exception,
outer_most_step_id=context.outer_most_step_id)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.