id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
3,075 |
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = str(
"cannot convert to 'bool'-dtype NumPy array with missing values."
+ " Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
|
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = str(
"cannot convert to 'bool'-dtype NumPy array with missing values."
" Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
|
12,061 |
def test_dry_run_quiet_option(runner):
with open("requirements", "w"):
pass
out = runner.invoke(cli, ["--dry-run", "--quiet", "-n", "requirements"])
# Dry-run massage has not been written to output. (An empty out.stderr
# results in a ValueError raised by Click)
try:
assert "Dry-run, so nothing updated." not in out.stderr.strip()
except ValueError:
pass
|
def test_dry_run_quiet_option(runner):
with open("requirements", "w"):
pass
out = runner.invoke(cli, ["--dry-run", "--quiet", "-n", "requirements"])
# Dry-run message has not been written to output. (An empty out.stderr
# results in a ValueError raised by Click)
try:
assert "Dry-run, so nothing updated." not in out.stderr.strip()
except ValueError:
pass
|
59,474 |
def _method_not_supported(function: Callable) -> Callable:
"""A method decorator to raise PusleError with kind message."""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
raise PulseError(f'A method ``ScheduleBlock.{function.__name__}`` is not supported '
'because this program does not have notion of instruction time. '
'Apply ``qiskit.pulse.transforms.block_to_schedule`` function to this '
'program to get ``Schedule`` representation supporting this method. '
'This method is being deprecated.')
return wrapper
|
def _method_not_supported(function: Callable) -> Callable:
"""A method decorator to raise a PulseError with a graceful error message
for operations which do not work for ``ScheduleBlock``s.
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
raise PulseError(f'A method ``ScheduleBlock.{function.__name__}`` is not supported '
'because this program does not have notion of instruction time. '
'Apply ``qiskit.pulse.transforms.block_to_schedule`` function to this '
'program to get ``Schedule`` representation supporting this method. '
'This method is being deprecated.')
return wrapper
|
9,381 |
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, bool):
filter_dict['Values'] = [str(v).lower()]
elif isinstance(v, integer_types):
filter_dict['Values'] = [str(v)]
elif isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
|
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, bool):
filter_dict['Values'] = [str(v).capitalize()]
elif isinstance(v, integer_types):
filter_dict['Values'] = [str(v)]
elif isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
|
2,892 |
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Unique Label Values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError("No argument has been passed.")
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (
label_type == "multilabel-indicator"
and len(
set(
check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys
)
)
> 1
):
raise ValueError(
"Multi-label binary indicator input with different numbers of labels"
)
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
|
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Non-unique label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError("No argument has been passed.")
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (
label_type == "multilabel-indicator"
and len(
set(
check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys
)
)
> 1
):
raise ValueError(
"Multi-label binary indicator input with different numbers of labels"
)
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
|
7,043 |
def get_rsync_rund_cmd(src, dst, reinstall=False, dry_run=False):
"""Create and return the rsync command used for cylc install/re-install.
Args:
src (str):
file path location of source directory
dst (str):
file path location of destination directory
reinstall (bool):
indicate reinstall (--delete option added)
dry-run (bool):
indicate dry-run, rsync will not take place but report output if a
real run were to be executed
Return:
list: command to use for rsync.
"""
rsync_cmd = ["rsync"]
rsync_cmd.append("-av")
if dry_run:
rsync_cmd.append("--dry-run")
if reinstall:
rsync_cmd.append('--delete')
ignore_dirs = [
'.git',
'.svn',
'.cylcignore',
'rose-workflow.conf',
'opt/rose-workflow-cylc-install.conf',
WorkflowFiles.LOG_DIR,
WorkflowFiles.Install.DIRNAME,
WorkflowFiles.Service.DIRNAME]
for exclude in ignore_dirs:
if (Path(src).joinpath(exclude).exists() or
Path(dst).joinpath(exclude).exists()):
rsync_cmd.append(f"--exclude={exclude}")
if Path(src).joinpath('.cylcignore').exists():
rsync_cmd.append("--exclude-from=.cylcignore")
rsync_cmd.append(f"{src}/")
rsync_cmd.append(f"{dst}/")
return rsync_cmd
|
def get_rsync_rund_cmd(src, dst, reinstall=False, dry_run=False):
"""Create and return the rsync command used for cylc install/re-install.
Args:
src (str):
file path location of source directory
dst (str):
file path location of destination directory
reinstall (bool):
indicate reinstall (--delete option added)
dry-run (bool):
indicate dry-run, rsync will not take place but report output if a
real run were to be executed
Return:
list: command to use for rsync.
"""
rsync_cmd = ["rsync"]
rsync_cmd.append("-av")
if dry_run:
rsync_cmd.append("--dry-run")
if reinstall:
rsync_cmd.append('--delete')
ignore_dirs = [
'.git',
'.svn',
'.cylcignore',
'rose-workflow.conf',
'opt/rose-suite-cylc-install.conf',
WorkflowFiles.LOG_DIR,
WorkflowFiles.Install.DIRNAME,
WorkflowFiles.Service.DIRNAME]
for exclude in ignore_dirs:
if (Path(src).joinpath(exclude).exists() or
Path(dst).joinpath(exclude).exists()):
rsync_cmd.append(f"--exclude={exclude}")
if Path(src).joinpath('.cylcignore').exists():
rsync_cmd.append("--exclude-from=.cylcignore")
rsync_cmd.append(f"{src}/")
rsync_cmd.append(f"{dst}/")
return rsync_cmd
|
47,958 |
def main():
args = build_argparser().parse_args()
log.info('Initializing Inference Engine...')
ie = IECore()
refine_config_plugin = get_plugin_configs(args.device_refine, args.refine_nstreams, args.refine_nthreads)
output_config_plugin = get_plugin_configs(args.device_output, args.output_nstreams, args.output_nthreads)
log.info('Loading network...')
model_proposal = models.ProposalModel(ie, args.model_proposal)
model_refine = models.RefineModel(ie, args.model_refine)
model_output = models.OutputModel(ie, args.model_output)
detector_pipeline = MtcnnPipeline(ie, model_proposal, model_refine, model_output,
pm_sync=not args.proposal_async,
pm_device=args.device_proposal,
rm_batch_size=args.refine_batch_size,
rm_config=refine_config_plugin,
rm_num_requests=args.refine_requests,
rm_device=args.device_refine,
om_batch_size=args.output_batch_size,
om_config=output_config_plugin,
om_num_requests=args.output_requests,
om_device=args.device_output)
cap = open_images_capture(args.input, args.loop)
log.info('Starting inference...')
print("Use 'c' key to disable/enable confidence drawing, 'l' to disable/enable landmarks drawing")
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
palette = ColorPalette(1)
metrics = PerformanceMetrics()
video_writer = cv2.VideoWriter()
draw_lanmdmark = True
draw_confidence = True
total_frames = 0
while True:
start_time = perf_counter()
frame = cap.read()
if not frame:
break
total_frames += 1
if total_frames == 1 :
presenter = monitors.Presenter(args.utilization_monitors, 55,
(round(frame.shape[1] / 4), round(frame.shape[0] / 8)))
if args.output:
video_writer = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(),
(frame.shape[1], frame.shape[0]))
if not video_writer.isOpened():
raise RuntimeError("Can't open video writer")
detections = detector_pipeline.infer(frame)
presenter.drawGraphs(frame)
draw_detections(frame, detections, palette, None, 0.5, draw_lanmdmark, draw_confidence)
metrics.update(start_time, frame)
if video_writer.isOpened() and (args.output_limit == -1 or total_frames <= args.output_limit - 1):
video_writer.write(frame)
if not args.no_show:
cv2.imshow('Detection Results', frame)
key = cv2.waitKey(1)
ESC_KEY = 27
# Quit.
if key in {ord('q'), ord('Q'), ESC_KEY}:
break
if key in {ord('l'), ord('L')}:
draw_lanmdmark = not draw_lanmdmark
if key in {ord('c'), ord('C')}:
draw_confidence = not draw_confidence
metrics.print_total()
|
def main():
args = build_argparser().parse_args()
log.info('Initializing Inference Engine...')
ie = IECore()
refine_config_plugin = get_plugin_configs(args.device_refine, args.refine_nstreams, args.refine_nthreads)
output_config_plugin = get_plugin_configs(args.device_output, args.output_nstreams, args.output_nthreads)
log.info('Loading network...')
model_proposal = models.ProposalModel(ie, args.model_proposal)
model_refine = models.RefineModel(ie, args.model_refine)
model_output = models.OutputModel(ie, args.model_output)
detector_pipeline = MtcnnPipeline(ie, model_proposal, model_refine, model_output,
pm_sync=not args.proposal_async,
pm_device=args.device_proposal,
rm_batch_size=args.refine_batch_size,
rm_config=refine_config_plugin,
rm_num_requests=args.refine_requests,
rm_device=args.device_refine,
om_batch_size=args.output_batch_size,
om_config=output_config_plugin,
om_num_requests=args.output_requests,
om_device=args.device_output)
cap = open_images_capture(args.input, args.loop)
log.info('Starting inference...')
print("Use 'c' key to disable/enable confidence drawing, 'l' to disable/enable landmarks drawing")
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
palette = ColorPalette(1)
metrics = PerformanceMetrics()
video_writer = cv2.VideoWriter()
draw_lanmdmark = True
draw_confidence = True
total_frames = 0
while True:
start_time = perf_counter()
frame = cap.read()
if not frame:
break
total_frames += 1
if total_frames == 1 :
presenter = monitors.Presenter(args.utilization_monitors, 55,
(round(frame.shape[1] / 4), round(frame.shape[0] / 8)))
if args.output:
video_writer = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(),
(frame.shape[1], frame.shape[0]))
if not video_writer.isOpened():
raise RuntimeError("Can't open video writer")
detections = detector_pipeline.infer(frame)
presenter.drawGraphs(frame)
draw_detections(frame, detections, palette, None, 0.5, draw_lanmdmark, draw_confidence)
metrics.update(start_time, frame)
if video_writer.isOpened() and (args.output_limit == -1 or total_frames <= args.output_limit - 1):
video_writer.write(frame)
if not args.no_show:
cv2.imshow('Detection Results', frame)
key = cv2.waitKey(1)
ESC_KEY = 27
# Quit.
if key in {ord('q'), ord('Q'), ESC_KEY}:
break
if key in {ord('l'), ord('L')}:
draw_lanmdmark = not draw_lanmdmark
elif key in {ord('c'), ord('C')}:
draw_confidence = not draw_confidence
metrics.print_total()
|
36,290 |
def simplify_pauli_sum(pauli_sum: PauliSum) -> PauliSum:
"""Simplify the sum of Pauli operators according to Pauli algebra rules."""
# You might want to use a defaultdict(list) here, but don't because
# we want to do our best to preserve the order of terms.
like_terms: Dict[Any, List[PauliTerm]] = OrderedDict()
for term in pauli_sum.terms:
key = term.operations_as_set()
if key in like_terms:
like_terms[key].append(term)
else:
like_terms[key] = [term]
terms = []
for term_list in like_terms.values():
first_term = term_list[0]
if len(term_list) == 1 and not np.isclose(first_term.coefficient, 0.0):
terms.append(first_term)
else:
coeff = sum(t.coefficient for t in term_list)
for t in term_list:
if list(t._ops.items()) != list(first_term._ops.items()):
warnings.warn(
"The term {} will be combined with {}, but they have different "
"orders of operations. This doesn't matter for QVM or "
"wavefunction simulation but may be important when "
"running on an actual device.".format(
t.id(sort_ops=False), first_term.id(sort_ops=False)
)
)
if not np.isclose(coeff, 0.0):
terms.append(term_with_coeff(term_list[0], coeff))
return PauliSum(terms)
|
def simplify_pauli_sum(pauli_sum: PauliSum) -> PauliSum:
"""Simplify the sum of Pauli operators according to Pauli algebra rules."""
# You might want to use a defaultdict(list) here, but don't because
# we want to do our best to preserve the order of terms.
like_terms: Dict[Hashable, List[PauliTerm]] = OrderedDict()
for term in pauli_sum.terms:
key = term.operations_as_set()
if key in like_terms:
like_terms[key].append(term)
else:
like_terms[key] = [term]
terms = []
for term_list in like_terms.values():
first_term = term_list[0]
if len(term_list) == 1 and not np.isclose(first_term.coefficient, 0.0):
terms.append(first_term)
else:
coeff = sum(t.coefficient for t in term_list)
for t in term_list:
if list(t._ops.items()) != list(first_term._ops.items()):
warnings.warn(
"The term {} will be combined with {}, but they have different "
"orders of operations. This doesn't matter for QVM or "
"wavefunction simulation but may be important when "
"running on an actual device.".format(
t.id(sort_ops=False), first_term.id(sort_ops=False)
)
)
if not np.isclose(coeff, 0.0):
terms.append(term_with_coeff(term_list[0], coeff))
return PauliSum(terms)
|
19,613 |
def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ["gcc", "-dumpversion"]
gcc_ver_prefix = "gcc-"
# accept clang as system compiler too
clang_ver_command = ["clang", "--version"]
clang_ver_prefix = "clang-"
ubinpath = os.path.join("/", portage.const.EPREFIX, "usr", "bin")
def getclangversion(output):
version = re.search("clang version ([0-9.]+) ", output)
if version:
return version.group(1)
return "unknown"
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n"
+ "!!! to update the environment of this terminal and possibly\n"
+ "!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(
[ubinpath + "/" + "gcc-config", "-c"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + gcc_ver_command[0]]
+ gcc_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
# no GCC? try Clang
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + clang_ver_command[0]]
+ clang_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return clang_ver_prefix + getclangversion(myoutput)
try:
proc = subprocess.Popen(
gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
|
def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ["gcc", "-dumpversion"]
gcc_ver_prefix = "gcc-"
# accept clang as system compiler too
clang_ver_command = ["clang", "--version"]
clang_ver_prefix = "clang-"
usr_bin_path = os.path.join("/", portage.const.EPREFIX, "usr", "bin")
def getclangversion(output):
version = re.search("clang version ([0-9.]+) ", output)
if version:
return version.group(1)
return "unknown"
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n"
+ "!!! to update the environment of this terminal and possibly\n"
+ "!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(
[ubinpath + "/" + "gcc-config", "-c"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + gcc_ver_command[0]]
+ gcc_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
# no GCC? try Clang
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + clang_ver_command[0]]
+ clang_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return clang_ver_prefix + getclangversion(myoutput)
try:
proc = subprocess.Popen(
gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
|
27,662 |
def _spike_test(stream, percent=0.99, multiplier=1e7):
"""
Check for very large spikes in data and raise an error if found.
:param stream: Stream to look for spikes in.
:type stream: :class:`obspy.core.stream.Stream`
:param percent: Percentage as a decimal to calculate range for.
:type percent: float
:param multiplier: Multiplier of range to define a spike.
:type multiplier: float
"""
list_ids = []
for tr in stream:
if (tr.data > 2 * np.max(np.sort(
np.abs(tr.data))[0:int(percent * len(tr.data))]
) * multiplier).sum() > 0:
list_ids.append(tr.id)
if list_ids != []:
ids = ', '.join(list_ids)
msg = ('Spikes above ' + str(multiplier) +
' of the range of ' + str(percent) +
' of the data present, check:\n' + ids + '.\n'
'This would otherwise likely result in an issue during ' +
'FFT prior to cross-correlation.\n' +
'If you think this spike is real please report ' +
'this as a bug.')
print(msg)
for ID in list_ids:
stream.remove(stream.select(id=ID)[0])
print('%s got removed by EQcorrscan because it had spike' % ID)
|
def _spike_test(stream, percent=0.99, multiplier=1e7):
"""
Check for very large spikes in data and raise an error if found.
:param stream: Stream to look for spikes in.
:type stream: :class:`obspy.core.stream.Stream`
:param percent: Percentage as a decimal to calculate range for.
:type percent: float
:param multiplier: Multiplier of range to define a spike.
:type multiplier: float
"""
list_ids = []
for tr in stream:
if (tr.data > 2 * np.max(np.sort(
np.abs(tr.data))[0:int(percent * len(tr.data))]
) * multiplier).sum() > 0:
list_ids.append(tr.id)
if list_ids != []:
ids = ', '.join(list_ids)
msg = ('Spikes above ' + str(multiplier) +
' of the range of ' + str(percent) +
' of the data present, check:\n' + ids + '.\n'
'This would otherwise likely result in an issue during ' +
'FFT prior to cross-correlation.\n' +
'If you think this spike is real please report ' +
'this as a bug.')
print(msg)
for _id in list_ids:
stream.remove(stream.select(id=ID)[0])
print('%s got removed by EQcorrscan because it had spike' % ID)
|
12,059 |
def test_dry_run_noisy_option(runner):
with open("requirements", "w"):
pass
out = runner.invoke(cli, ["--dry-run", "-n", "requirements"])
# Dry-run massage has been written to output
assert "Dry-run, so nothing updated." in out.stderr.strip()
|
def test_dry_run_noisy_option(runner):
with open("requirements", "w"):
pass
out = runner.invoke(cli, ["--dry-run", "-n", "requirements"])
# Dry-run massage has been written to output
assert "Dry-run, so nothing updated." in out.stderr.splitlines()
|
38,045 |
def data_kind(data, x=None, y=None, z=None, required_z=False):
"""
Check what kind of data is provided to a module.
Possible types:
* a file name provided as 'data'
* a pathlib.Path provided as 'data'
* an xarray.DataArray provided as 'data'
* a matrix provided as 'data'
* 1D arrays x and y (and z, optionally)
Arguments should be ``None`` if not used. If doesn't fit any of these
categories (or fits more than one), will raise an exception.
Parameters
----------
data : str or pathlib.Path or xarray.DataArray or {table-like} or None
Pass in either a file name or :class:`pathlib.Path` to an ASCII data
table, an :class:`xarray.DataArray`, a 1D/2D
{table-classes}.
x/y : 1d arrays or None
x and y columns as numpy arrays.
z : 1d array or None
z column as numpy array. To be used optionally when x and y are given.
required_z : bool
State whether the 'z' column is required. [Default is False].
Returns
-------
kind : str
One of: ``'file'``, ``'grid'``, ``'matrix'``, ``'vectors'``.
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> import pathlib
>>> data_kind(data=None, x=np.array([1, 2, 3]), y=np.array([4, 5, 6]))
'vectors'
>>> data_kind(data=np.arange(10).reshape((5, 2)), x=None, y=None)
'matrix'
>>> data_kind(data="my-data-file.txt", x=None, y=None)
'file'
>>> data_kind(data=pathlib.Path("my-data-file.txt"), x=None, y=None)
'file'
>>> data_kind(data=xr.DataArray(np.random.rand(4, 3)))
'grid'
"""
if data is None and x is None and y is None:
raise GMTInvalidInput("No input data provided.")
if data is not None and (x is not None or y is not None or z is not None):
raise GMTInvalidInput("Too much data. Use either data or x and y.")
if data is None and (x is None or y is None):
raise GMTInvalidInput("Must provide both x and y.")
if data is None and required_z and z is None:
raise GMTInvalidInput("Must provide x, y, and z.")
if isinstance(data, (str, pathlib.PurePath)):
kind = "file"
elif isinstance(data, xr.DataArray):
kind = "grid"
elif hasattr(data, "__geo_interface__"):
kind = "geojson"
elif data is not None:
if required_z and (
getattr(data, "shape", (3, 3))[1] < 3 # np.array, pd.DataFrame
or len(getattr(data, "data_vars", (0, 1, 2))) < 3 # xr.Dataset
):
raise GMTInvalidInput("data must provide x, y, and z columns.")
kind = "matrix"
else:
kind = "vectors"
return kind
|
def data_kind(data, x=None, y=None, z=None, required_z=False):
"""
Check what kind of data is provided to a module.
Possible types:
* a file name provided as 'data'
* a pathlib.Path provided as 'data'
* an xarray.DataArray provided as 'data'
* a matrix provided as 'data'
* 1D arrays x and y (and z, optionally)
Arguments should be ``None`` if not used. If doesn't fit any of these
categories (or fits more than one), will raise an exception.
Parameters
----------
data : str or pathlib.Path or xarray.DataArray or {table-like} or None
Pass in either a file name or :class:`pathlib.Path` to an ASCII data
table, an :class:`xarray.DataArray`, a 1D/2D
{table-classes}.
x/y : 1d arrays or None
x and y columns as numpy arrays.
z : 1d array or None
z column as numpy array. To be used optionally when x and y are given.
required_z : bool
State whether the 'z' column is required.
Returns
-------
kind : str
One of: ``'file'``, ``'grid'``, ``'matrix'``, ``'vectors'``.
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> import pathlib
>>> data_kind(data=None, x=np.array([1, 2, 3]), y=np.array([4, 5, 6]))
'vectors'
>>> data_kind(data=np.arange(10).reshape((5, 2)), x=None, y=None)
'matrix'
>>> data_kind(data="my-data-file.txt", x=None, y=None)
'file'
>>> data_kind(data=pathlib.Path("my-data-file.txt"), x=None, y=None)
'file'
>>> data_kind(data=xr.DataArray(np.random.rand(4, 3)))
'grid'
"""
if data is None and x is None and y is None:
raise GMTInvalidInput("No input data provided.")
if data is not None and (x is not None or y is not None or z is not None):
raise GMTInvalidInput("Too much data. Use either data or x and y.")
if data is None and (x is None or y is None):
raise GMTInvalidInput("Must provide both x and y.")
if data is None and required_z and z is None:
raise GMTInvalidInput("Must provide x, y, and z.")
if isinstance(data, (str, pathlib.PurePath)):
kind = "file"
elif isinstance(data, xr.DataArray):
kind = "grid"
elif hasattr(data, "__geo_interface__"):
kind = "geojson"
elif data is not None:
if required_z and (
getattr(data, "shape", (3, 3))[1] < 3 # np.array, pd.DataFrame
or len(getattr(data, "data_vars", (0, 1, 2))) < 3 # xr.Dataset
):
raise GMTInvalidInput("data must provide x, y, and z columns.")
kind = "matrix"
else:
kind = "vectors"
return kind
|
39,557 |
def get_item(object: dict, *attributes):
"""
Return `item` by going through all the `attributes` present in the `json_object`
Do a DFS for the `item` in the `json_object` by traversing the `attributes`
and return None if can not traverse through the `attributes`
For example:
>>> get_item({'a': {'b': {'c': 'd'}}}, 'a', 'b', 'c')
'd'
>>> assert(get_item({'a': {'b': {'c': 'd'}}}, 'a', 'b', 'e')) == None
"""
if not object:
logger.error(f"Object is empty: {object}")
return
item = object
for attribute in attributes:
if attribute not in item:
logger.error(f"Missing attribute {attribute} in {item}")
return None
item = item[attribute]
return item
|
def get_item(object: dict, *attributes):
"""
Return `item` by going through all the `attributes` present in the `json_object`
Do a DFS for the `item` in the `json_object` by traversing the `attributes`
and return None if can not traverse through the `attributes`
For example:
>>> get_item({'a': {'b': {'c': 'd'}}}, 'a', 'b', 'c')
'd'
>>> assert(get_item({'a': {'b': {'c': 'd'}}}, 'a', 'b', 'e')) == None
"""
if not object:
return
return
item = object
for attribute in attributes:
if attribute not in item:
logger.error(f"Missing attribute {attribute} in {item}")
return None
item = item[attribute]
return item
|
31,935 |
def checkpoint_add_objects_batch_command(client: Client, object_type: str, ipaddress, name):
context_data = {}
readable_output = ''
ipaddress = argToList(ipaddress, ',')
name = argToList(name, ',')
add_list = []
for ip, n in zip(ipaddress, name):
tmp_dict = {'name': n, 'ip-address': ip}
add_list.append(tmp_dict)
result = current_result = client.add_objects_batch(object_type, add_list)
if result:
context_data = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for add-objects-batch command:',
context_data)
command_results = CommandResults(
outputs_prefix='CheckPoint.add_objects_batch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=context_data,
raw_response=result
)
return command_results
|
def checkpoint_add_objects_batch_command(client: Client, object_type: str, ipaddress, name):
context_data = {}
readable_output = ''
ipaddress = argToList(ipaddress, ',')
name = argToList(name, ',')
add_list = []
for ip, n in zip(ipaddress, name):
tmp_dict = {'name': n, 'ip-address': ip}
add_list.append(tmp_dict)
result = current_result = client.add_objects_batch(object_type, add_list)
if result:
context_data = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for add-objects-batch command:',
context_data)
command_results = CommandResults(
outputs_prefix='CheckPoint.add_objects_batch',
outputs_key_field='task-id',
readable_output=readable_output,
outputs=context_data,
raw_response=result
)
return command_results
|
31,149 |
def main():
SESSION.proxies = handle_proxy()
client = SixgillEnrichClient(
demisto.params()["client_id"], demisto.params()["client_secret"], CHANNEL_CODE, demisto, SESSION, VERIFY
)
command = demisto.command()
demisto.info(f"Command being called is {command}")
commands: Dict[str, Callable] = {
"test-module": test_module_command,
}
try:
if demisto.command() == "ip":
return_results(ip_reputation_command(client, demisto.args()))
elif demisto.command() == "domain":
return_results(domain_reputation_command(client, demisto.args()))
elif demisto.command() == "url":
return_results(url_reputation_command(client, demisto.args()))
elif demisto.command() == "file":
return_results(file_reputation_command(client, demisto.args()))
elif demisto.command() == "actor":
return_results(actor_reputation_command(client, demisto.args()))
elif demisto.command() == "post_id":
return_results(postid_reputation_command(client, demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Error failed to execute {demisto.command()}, error: [{e}]")
|
def main():
SESSION.proxies = handle_proxy()
client = SixgillEnrichClient(
demisto.params()["client_id"], demisto.params()["client_secret"], CHANNEL_CODE, demisto, SESSION, VERIFY
)
command = demisto.command()
demisto.info(f"Command being called is {command}")
commands: Dict[str, Callable] = {
"test-module": test_module_command,
}
try:
if demisto.command() == "ip":
return_results(ip_reputation_command(client, demisto.args()))
elif demisto.command() == "domain":
return_results(domain_reputation_command(client, demisto.args()))
elif demisto.command() == "url":
return_results(url_reputation_command(client, demisto.args()))
elif command == "file":
return_results(file_reputation_command(client, demisto.args()))
elif demisto.command() == "actor":
return_results(actor_reputation_command(client, demisto.args()))
elif demisto.command() == "post_id":
return_results(postid_reputation_command(client, demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Error failed to execute {demisto.command()}, error: [{e}]")
|
6,974 |
def get_safe_globals():
datautils = frappe._dict()
if frappe.db:
date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
time_format = frappe.db.get_default("time_format") or "HH:mm:ss"
else:
date_format = "yyyy-mm-dd"
time_format = "HH:mm:ss"
add_data_utils(datautils)
form_dict = getattr(frappe.local, "form_dict", frappe._dict())
if "_" in form_dict:
del frappe.local.form_dict["_"]
user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
out = NamespaceDict(
# make available limited methods of frappe
json=NamespaceDict(loads=json.loads, dumps=json.dumps),
as_json=frappe.as_json,
dict=dict,
log=frappe.log,
_dict=frappe._dict,
args=form_dict,
frappe=NamespaceDict(
call=call_whitelisted_function,
flags=frappe._dict(),
format=frappe.format_value,
format_value=frappe.format_value,
date_format=date_format,
time_format=time_format,
format_date=frappe.utils.data.global_date_format,
form_dict=form_dict,
bold=frappe.bold,
copy_doc=frappe.copy_doc,
errprint=frappe.errprint,
qb=frappe.qb,
get_meta=frappe.get_meta,
new_doc=frappe.new_doc,
get_doc=frappe.get_doc,
get_mapped_doc=get_mapped_doc,
get_last_doc=frappe.get_last_doc,
get_cached_doc=frappe.get_cached_doc,
get_list=frappe.get_list,
get_all=frappe.get_all,
get_system_settings=frappe.get_system_settings,
rename_doc=rename_doc,
delete_doc=delete_doc,
utils=datautils,
get_url=frappe.utils.get_url,
render_template=frappe.render_template,
msgprint=frappe.msgprint,
throw=frappe.throw,
sendmail=frappe.sendmail,
get_print=frappe.get_print,
attach_print=frappe.attach_print,
user=user,
get_fullname=frappe.utils.get_fullname,
get_gravatar=frappe.utils.get_gravatar_url,
full_name=frappe.local.session.data.full_name
if getattr(frappe.local, "session", None)
else "Guest",
request=getattr(frappe.local, "request", {}),
session=frappe._dict(
user=user,
csrf_token=frappe.local.session.data.csrf_token
if getattr(frappe.local, "session", None)
else "",
),
make_get_request=frappe.integrations.utils.make_get_request,
make_post_request=frappe.integrations.utils.make_post_request,
get_payment_gateway_controller=frappe.integrations.utils.get_payment_gateway_controller,
socketio_port=frappe.conf.socketio_port,
get_hooks=get_hooks,
enqueue=safe_enqueue,
sanitize_html=frappe.utils.sanitize_html,
log_error=frappe.log_error,
db=NamespaceDict(
get_list=frappe.get_list,
get_all=frappe.get_all,
get_value=frappe.db.get_value,
set_value=frappe.db.set_value,
get_single_value=frappe.db.get_single_value,
get_default=frappe.db.get_default,
exists=frappe.db.exists,
count=frappe.db.count,
escape=frappe.db.escape,
sql=read_sql,
commit=frappe.db.commit,
rollback=frappe.db.rollback,
add_index=frappe.db.add_index,
),
),
FrappeClient=FrappeClient,
style=frappe._dict(border_color="#d1d8dd"),
get_toc=get_toc,
get_next_link=get_next_link,
_=frappe._,
get_shade=get_shade,
scrub=scrub,
guess_mimetype=mimetypes.guess_type,
html2text=html2text,
dev_server=1 if frappe.local.dev_server else 0,
run_script=run_script,
is_job_queued=is_job_queued,
get_visible_columns=get_visible_columns,
)
add_module_properties(
frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception)
)
if frappe.response:
out.frappe.response = frappe.response
out.update(safe_globals)
# default writer allows write access
out._write_ = _write
out._getitem_ = _getitem
out._getattr_ = _getattr
# allow iterators and list comprehension
out._getiter_ = iter
out._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence
# add common python builtins
out.update(get_python_builtins())
return out
|
def get_safe_globals():
datautils = frappe._dict()
if frappe.db:
date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
time_format = frappe.db.get_default("time_format") or "HH:mm:ss"
else:
date_format = "yyyy-mm-dd"
time_format = "HH:mm:ss"
add_data_utils(datautils)
form_dict = getattr(frappe.local, "form_dict", frappe._dict())
if "_" in form_dict:
del frappe.local.form_dict["_"]
user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
out = NamespaceDict(
# make available limited methods of frappe
json=NamespaceDict(loads=json.loads, dumps=json.dumps),
as_json=frappe.as_json,
dict=dict,
log=frappe.log,
_dict=frappe._dict,
args=form_dict,
frappe=NamespaceDict(
call=call_whitelisted_function,
flags=frappe._dict(),
format=frappe.format_value,
format_value=frappe.format_value,
date_format=date_format,
time_format=time_format,
format_date=frappe.utils.data.global_date_format,
form_dict=form_dict,
bold=frappe.bold,
copy_doc=frappe.copy_doc,
errprint=frappe.errprint,
qb=frappe.qb,
get_meta=frappe.get_meta,
new_doc=frappe.new_doc,
get_doc=frappe.get_doc,
get_mapped_doc=get_mapped_doc,
get_last_doc=frappe.get_last_doc,
get_cached_doc=frappe.get_cached_doc,
get_list=frappe.get_list,
get_all=frappe.get_all,
get_system_settings=frappe.get_system_settings,
rename_doc=rename_doc,
delete_doc=delete_doc,
utils=datautils,
get_url=frappe.utils.get_url,
render_template=frappe.render_template,
msgprint=frappe.msgprint,
throw=frappe.throw,
sendmail=frappe.sendmail,
get_print=frappe.get_print,
attach_print=frappe.attach_print,
user=user,
get_fullname=frappe.utils.get_fullname,
get_gravatar=frappe.utils.get_gravatar_url,
full_name=frappe.local.session.data.full_name
if getattr(frappe.local, "session", None)
else "Guest",
request=getattr(frappe.local, "request", {}),
session=frappe._dict(
user=user,
csrf_token=frappe.local.session.data.csrf_token
if getattr(frappe.local, "session", None)
else "",
),
make_get_request=frappe.integrations.utils.make_get_request,
make_post_request=frappe.integrations.utils.make_post_request,
get_payment_gateway_controller=frappe.integrations.utils.get_payment_gateway_controller,
socketio_port=frappe.conf.socketio_port,
get_hooks=get_hooks,
enqueue=safe_enqueue,
sanitize_html=frappe.utils.sanitize_html,
log_error=frappe.log_error,
db=NamespaceDict(
get_list=frappe.get_list,
get_all=frappe.get_all,
get_value=frappe.db.get_value,
set_value=frappe.db.set_value,
get_single_value=frappe.db.get_single_value,
get_default=frappe.db.get_default,
exists=frappe.db.exists,
count=frappe.db.count,
escape=frappe.db.escape,
sql=read_sql,
commit=frappe.db.commit,
rollback=frappe.db.rollback,
add_index=frappe.db.add_index,
),
),
FrappeClient=FrappeClient,
style=frappe._dict(border_color="#d1d8dd"),
get_toc=get_toc,
get_next_link=get_next_link,
_=frappe._,
get_shade=get_shade,
scrub=scrub,
guess_mimetype=mimetypes.guess_type,
html2text=html2text,
dev_server=int(frappe.local.dev_server),
run_script=run_script,
is_job_queued=is_job_queued,
get_visible_columns=get_visible_columns,
)
add_module_properties(
frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception)
)
if frappe.response:
out.frappe.response = frappe.response
out.update(safe_globals)
# default writer allows write access
out._write_ = _write
out._getitem_ = _getitem
out._getattr_ = _getattr
# allow iterators and list comprehension
out._getiter_ = iter
out._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence
# add common python builtins
out.update(get_python_builtins())
return out
|
17,532 |
def load_releases_tags():
tags = []
tags_fastcomp = []
info = load_releases_info()
for version, sha in sorted(info['releases'].items(), key=lambda x: version_key(x[0])):
tags.append(sha)
# Only include versions older than 1.39.0 in fastcomp releases
if version_key(version) < (2, 0, 0):
tags_fastcomp.append(sha)
if extra_release_tag:
tags.append(extra_release_tag)
# Adds the currently installed SDK version, which might be custom
# version. This means it will show up in `list` and work with
# `construct_env`
version_file = sdk_path(os.path.join('upstream', '.emsdk_version'))
if os.path.exists(version_file):
with open(version_file) as f:
version = f.read()
version = version.split('-')[2]
if version not in tags:
tags.append(version)
return tags, tags_fastcomp
|
def load_releases_tags():
tags = []
tags_fastcomp = []
info = load_releases_info()
for version, sha in sorted(info['releases'].items(), key=lambda x: version_key(x[0])):
tags.append(sha)
# Only include versions older than 1.39.0 in fastcomp releases
if version_key(version) < (2, 0, 0):
tags_fastcomp.append(sha)
if extra_release_tag:
tags.append(extra_release_tag)
# Adds the currently installed SDK version, which might be a custom
# version. This means it will show up in `list` and work with
# `construct_env`
version_file = sdk_path(os.path.join('upstream', '.emsdk_version'))
if os.path.exists(version_file):
with open(version_file) as f:
version = f.read()
version = version.split('-')[2]
if version not in tags:
tags.append(version)
return tags, tags_fastcomp
|
36,642 |
def test_jpeg(h, f):
"""JPEG data in JFIF or Exif format"""
if h[6:10] in (b'JFIF', b'Exif'):
return 'jpeg'
elif h[:4]==b'\xff\xd8\xff\xdb':
return 'jpeg'
|
def test_jpeg(h, f):
"""JPEG data in JFIF or Exif format"""
if h[6:10] in (b'JFIF', b'Exif'):
return 'jpeg'
elif h[:4] == b'\xff\xd8\xff\xdb':
return 'jpeg'
|
30,182 |
def get_hash_artifacts(generic_url):
representation_object = get_hashable_representation(generic_url)
dhash = hashlib.md5()
encoded = json.dumps(representation_object, sort_keys=True).encode()
dhash.update(encoded)
hash_artifacts = dhash.hexdigest()
return hash_artifacts
|
def hash_artifacts(generic_url):
representation_object = get_hashable_representation(generic_url)
dhash = hashlib.md5()
encoded = json.dumps(representation_object, sort_keys=True).encode()
dhash.update(encoded)
hash_artifacts = dhash.hexdigest()
return hash_artifacts
|
20,009 |
def acute_vertex(img, obj, win, thresh, sep):
"""acute_vertex: identify corners/acute angles of an object
For each point in contour, get a point before (pre) and after (post) the point of interest,
calculate the angle between the pre and post point.
Inputs:
img = the original image
obj = a contour of the plant object (this should be output from the object_composition.py fxn)
win = win argument specifies the pre and post point distances (a value of 30 worked well for a sample image)
thresh = an threshold to set for acuteness; keep points with an angle more acute than the threshold (a value of 15
worked well for sample image)
sep = the number of contour points to search within for the most acute value
Returns:
acute_points = list of acute points
img2 = debugging image
:param img: ndarray
:param obj: ndarray
:param win: int
:param thresh: int
:param sep: int
:return acute: ndarray
:return img2: ndarray
"""
params.device += 1
chain = []
if not np.any(obj):
acute = ('NA', 'NA')
return acute
for i in range(len(obj) - win):
x, y = obj[i].ravel()
pre_x, pre_y = obj[i - win].ravel()
post_x, post_y = obj[i + win].ravel()
# Angle in radians derived from Law of Cosines, converted to degrees
P12 = np.sqrt((x-pre_x)*(x-pre_x)+(y-pre_y)*(y-pre_y))
P13 = np.sqrt((x-post_x)*(x-post_x)+(y-post_y)*(y-post_y))
P23 = np.sqrt((pre_x-post_x)*(pre_x-post_x)+(pre_y-post_y)*(pre_y-post_y))
if (2*P12*P13) > 0.001:
dot = (P12*P12 + P13*P13 - P23*P23)/(2*P12*P13)
elif (2*P12*P13) < 0.001:
dot = (P12*P12 + P13*P13 - P23*P23)/0.001
if dot < -1: # If float exceeds -1 prevent arcos error and force to equal -1
dot = -1
ang = math.degrees(math.acos(dot))
chain.append(ang)
# Select points in contour that have an angle more acute than thresh
index = []
for c in range(len(chain)):
if float(chain[c]) <= thresh:
index.append(c)
# There oftentimes several points around tips with acute angles
# Here we try to pick the most acute angle given a set of contiguous point
# Sep is the number of points to evaluate the number of vertices
out = []
tester = []
for i in range(len(index)-1):
# print str(index[i])
if index[i+1] - index[i] < sep:
tester.append(index[i])
if index[i+1] - index[i] >= sep:
tester.append(index[i])
# print(tester)
angles = ([chain[d] for d in tester])
keeper = angles.index(min(angles))
t = tester[keeper]
# print str(t)
out.append(t)
tester = []
# Store the points in the variable acute
flag = 0
acute = obj[[out]]
acute_points = []
for pt in acute:
acute_points.append(pt[0].tolist())
img2 = np.copy(img)
# Plot each of these tip points on the image
for i in acute:
x, y = i.ravel()
cv2.circle(img2, (x, y), params.line_thickness, (255, 0, 255), -1)
if params.debug == 'print':
print_image(img2, os.path.join(params.debug_outdir, str(params.device) + '_acute_vertices.png'))
elif params.debug == 'plot':
plot_image(img2)
# Store into global measurements
outputs.add_observation(variable='tip_coordinates', trait='tip coordinates',
method='plantcv.plantcv.acute_vertex', scale='none', datatype=list,
value=acute_points, label='none')
return acute_points, img2
|
def acute_vertex(img, obj, win, thresh, sep):
"""acute_vertex: identify corners/acute angles of an object
For each point in contour, get a point before (pre) and after (post) the point of interest,
calculate the angle between the pre and post point.
Inputs:
img = the original image
obj = a contour of the plant object (this should be output from the object_composition.py fxn)
win = win argument specifies the pre and post point distances (a value of 30 worked well for a sample image)
thresh = an threshold to set for acuteness; keep points with an angle more acute than the threshold (a value of 15
worked well for sample image)
sep = the number of contour points to search within for the most acute value
Returns:
acute_points = list of acute points
img2 = debugging image
:param img: ndarray
:param obj: ndarray
:param win: int
:param thresh: int
:param sep: int
:return acute_points: ndarray
:return img2: ndarray
"""
params.device += 1
chain = []
if not np.any(obj):
acute = ('NA', 'NA')
return acute
for i in range(len(obj) - win):
x, y = obj[i].ravel()
pre_x, pre_y = obj[i - win].ravel()
post_x, post_y = obj[i + win].ravel()
# Angle in radians derived from Law of Cosines, converted to degrees
P12 = np.sqrt((x-pre_x)*(x-pre_x)+(y-pre_y)*(y-pre_y))
P13 = np.sqrt((x-post_x)*(x-post_x)+(y-post_y)*(y-post_y))
P23 = np.sqrt((pre_x-post_x)*(pre_x-post_x)+(pre_y-post_y)*(pre_y-post_y))
if (2*P12*P13) > 0.001:
dot = (P12*P12 + P13*P13 - P23*P23)/(2*P12*P13)
elif (2*P12*P13) < 0.001:
dot = (P12*P12 + P13*P13 - P23*P23)/0.001
if dot < -1: # If float exceeds -1 prevent arcos error and force to equal -1
dot = -1
ang = math.degrees(math.acos(dot))
chain.append(ang)
# Select points in contour that have an angle more acute than thresh
index = []
for c in range(len(chain)):
if float(chain[c]) <= thresh:
index.append(c)
# There oftentimes several points around tips with acute angles
# Here we try to pick the most acute angle given a set of contiguous point
# Sep is the number of points to evaluate the number of vertices
out = []
tester = []
for i in range(len(index)-1):
# print str(index[i])
if index[i+1] - index[i] < sep:
tester.append(index[i])
if index[i+1] - index[i] >= sep:
tester.append(index[i])
# print(tester)
angles = ([chain[d] for d in tester])
keeper = angles.index(min(angles))
t = tester[keeper]
# print str(t)
out.append(t)
tester = []
# Store the points in the variable acute
flag = 0
acute = obj[[out]]
acute_points = []
for pt in acute:
acute_points.append(pt[0].tolist())
img2 = np.copy(img)
# Plot each of these tip points on the image
for i in acute:
x, y = i.ravel()
cv2.circle(img2, (x, y), params.line_thickness, (255, 0, 255), -1)
if params.debug == 'print':
print_image(img2, os.path.join(params.debug_outdir, str(params.device) + '_acute_vertices.png'))
elif params.debug == 'plot':
plot_image(img2)
# Store into global measurements
outputs.add_observation(variable='tip_coordinates', trait='tip coordinates',
method='plantcv.plantcv.acute_vertex', scale='none', datatype=list,
value=acute_points, label='none')
return acute_points, img2
|
30,806 |
def disable_user_command(client, args):
"""
Disable user using PUT to Envoy API , if Connection to the service is successful.
Args: demisto command line argument
client: Envoy
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
old_scim = verify_and_load_scim_data(args.get('scim'))
new_scim = {'active': False}
format_pre_text = 'Disable'
return process_update_command(client, args, old_scim, new_scim, format_pre_text)
|
def disable_user_command(client, args):
"""
Disable user using PUT to Envoy API , if Connection to the service is successful.
Args: demisto command line argument
client: Envoy
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
old_scim = verify_and_load_scim_data(args.get('scim'))
new_scim = {'active': False}
command_name = 'Disable'
return process_update_command(client, args, old_scim, new_scim, format_pre_text)
|
59,048 |
def parse_other_functions(o, otherfuncs, sigs, external_interfaces, global_ctx, default_function):
# check for payable/nonpayable external functions to optimize nonpayable assertions
func_types = [i._metadata["type"] for i in global_ctx._defs]
mutabilities = [i.mutability for i in func_types if i.visibility == FunctionVisibility.EXTERNAL]
has_payable = next((True for i in mutabilities if i == StateMutability.PAYABLE), False)
has_nonpayable = next((True for i in mutabilities if i != StateMutability.PAYABLE), False)
is_default_payable = (
default_function is not None
and default_function._metadata["type"].mutability == StateMutability.PAYABLE
)
# when a contract has a payable default function and at least one nonpayable
# external function, we must perform the nonpayable check on every function
check_per_function = is_default_payable and has_nonpayable
# generate LLL for regular functions
payable_func_sub = ["seq"]
external_func_sub = ["seq"]
internal_func_sub = ["seq"]
add_gas = func_init_lll().gas
for func_node in otherfuncs:
func_type = func_node._metadata["type"]
func_lll = parse_function(
func_node, {**{"self": sigs}, **external_interfaces}, global_ctx, check_per_function
)
if func_type.visibility == FunctionVisibility.INTERNAL:
internal_func_sub.append(func_lll)
elif func_type.mutability == StateMutability.PAYABLE:
add_gas += 30
payable_func_sub.append(func_lll)
else:
external_func_sub.append(func_lll)
add_gas += 30
func_lll.total_gas += add_gas
for sig in sig_utils.generate_default_arg_sigs(func_node, external_interfaces, global_ctx):
sig.gas = func_lll.total_gas
sigs[sig.sig] = sig
# generate LLL for fallback function
if default_function:
fallback_lll = parse_function(
default_function,
{**{"self": sigs}, **external_interfaces},
global_ctx,
# include a nonpayble check here if the contract only has a default function
check_per_function or not otherfuncs,
)
else:
fallback_lll = LLLnode.from_list(["revert", 0, 0], typ=None, annotation="Default function")
if check_per_function:
external_seq = ["seq", payable_func_sub, external_func_sub]
else:
# payable functions are placed prior to nonpayable functions
# and seperated by a nonpayable assertion
external_seq = ["seq"]
if has_payable:
external_seq.append(payable_func_sub)
if has_nonpayable:
external_seq.extend([["assert", ["iszero", "callvalue"]], external_func_sub])
# bytecode is organized by: external functions, fallback fn, internal functions
# this way we save gas and reduce bytecode by not jumping over internal functions
main_seq = [
"seq",
func_init_lll(),
["with", "_func_sig", ["mload", 0], external_seq],
["seq_unchecked", ["label", "fallback"], fallback_lll],
internal_func_sub,
]
o.append(["return", 0, ["lll", main_seq, 0]])
return o, main_seq
|
def parse_other_functions(o, otherfuncs, sigs, external_interfaces, global_ctx, default_function):
# check for payable/nonpayable external functions to optimize nonpayable assertions
func_types = [i._metadata["type"] for i in global_ctx._defs]
mutabilities = [i.mutability for i in func_types if i.visibility == FunctionVisibility.EXTERNAL]
has_payable = next((True for i in mutabilities if i == StateMutability.PAYABLE), None) is None
has_nonpayable = next((True for i in mutabilities if i != StateMutability.PAYABLE), None) is None
is_default_payable = (
default_function is not None
and default_function._metadata["type"].mutability == StateMutability.PAYABLE
)
# when a contract has a payable default function and at least one nonpayable
# external function, we must perform the nonpayable check on every function
check_per_function = is_default_payable and has_nonpayable
# generate LLL for regular functions
payable_func_sub = ["seq"]
external_func_sub = ["seq"]
internal_func_sub = ["seq"]
add_gas = func_init_lll().gas
for func_node in otherfuncs:
func_type = func_node._metadata["type"]
func_lll = parse_function(
func_node, {**{"self": sigs}, **external_interfaces}, global_ctx, check_per_function
)
if func_type.visibility == FunctionVisibility.INTERNAL:
internal_func_sub.append(func_lll)
elif func_type.mutability == StateMutability.PAYABLE:
add_gas += 30
payable_func_sub.append(func_lll)
else:
external_func_sub.append(func_lll)
add_gas += 30
func_lll.total_gas += add_gas
for sig in sig_utils.generate_default_arg_sigs(func_node, external_interfaces, global_ctx):
sig.gas = func_lll.total_gas
sigs[sig.sig] = sig
# generate LLL for fallback function
if default_function:
fallback_lll = parse_function(
default_function,
{**{"self": sigs}, **external_interfaces},
global_ctx,
# include a nonpayble check here if the contract only has a default function
check_per_function or not otherfuncs,
)
else:
fallback_lll = LLLnode.from_list(["revert", 0, 0], typ=None, annotation="Default function")
if check_per_function:
external_seq = ["seq", payable_func_sub, external_func_sub]
else:
# payable functions are placed prior to nonpayable functions
# and seperated by a nonpayable assertion
external_seq = ["seq"]
if has_payable:
external_seq.append(payable_func_sub)
if has_nonpayable:
external_seq.extend([["assert", ["iszero", "callvalue"]], external_func_sub])
# bytecode is organized by: external functions, fallback fn, internal functions
# this way we save gas and reduce bytecode by not jumping over internal functions
main_seq = [
"seq",
func_init_lll(),
["with", "_func_sig", ["mload", 0], external_seq],
["seq_unchecked", ["label", "fallback"], fallback_lll],
internal_func_sub,
]
o.append(["return", 0, ["lll", main_seq, 0]])
return o, main_seq
|
27,905 |
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- The function maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintain the moving
average of standard deviations :math:`\\sigma`.
- The function applies Bessel's correction to update the moving average
of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
|
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- The function maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintain the moving
average of standard deviations :math:`\\sigma`.
- ``F.batch_renormalization`` applies Bessel's correction to update the moving average
of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
|
809 |
def series(expr, x=None, x0=0, n=6, dir="+"):
"""Returns
=======
Series expansion of expr around point `x = x0`.
Parameters
==========
expr : Expression
The expression whose series is to be expanded.
x : Symbol
It is the variable of the expression to be calculated.
x0 : Value
The value around which ``x`` is calculated. Can be ``oo`` or
``-oo``.
n : Value
The number of terms upto which the series is to be expanded.
dir : String
Optional (default: "+")
The series-expansion can be bi-directional. If ``dir="+"``,
then (x->x0+). If ``dir="-", then (x->x0-). For infinite
``x0`` (``oo`` or ``-oo``), the ``dir`` argument is determined
from the direction of the infinity (i.e., ``dir="-"`` for
``oo``).
Examples
========
>>> from sympy import Symbol, series, tan, oo
>>> from sympy.abc import x
>>> f = tan(x)
>>> f.series(x, 2, 6, "+")
tan(2) + (1 + tan(2)**2)*(x - 2) + (x - 2)**2*(tan(2)**3 + tan(2)) +
(x - 2)**3*(1/3 + 4*tan(2)**2/3 + tan(2)**4) + (x - 2)**4*(tan(2)**5 +
5*tan(2)**3/3 + 2*tan(2)/3) + (x - 2)**5*(2/15 + 17*tan(2)**2/15 +
2*tan(2)**4 + tan(2)**6) + O((x - 2)**6, (x, 2))
>>> f.series(x, 2, 3, "-")
tan(2) + (-x + 2)*(-tan(2)**2 - 1) + (-x + 2)**2*(tan(2)**3 + tan(2))
+ O((x - 2)**3, (x, 2))
>>> f.series(x, 2, oo, "+")
Traceback (most recent call last):
...
sympy.core.function.PoleError: Asymptotic expansion of tan around [oo]
is not implemented.
See the docstring of Expr.series() for complete details of this wrapper.
"""
expr = sympify(expr)
return expr.series(x, x0, n, dir)
|
def series(expr, x=None, x0=0, n=6, dir="+"):
"""Returns
=======
Series expansion of expr around point `x = x0`.
Parameters
==========
expr : Expression
The expression whose series is to be expanded.
x : Symbol
It is the variable of the expression to be calculated.
x0 : Value
The value around which ``x`` is calculated. Can be ``oo`` or
``-oo``.
n : Integer
The number of terms upto which the series is to be expanded.
dir : String
Optional (default: "+")
The series-expansion can be bi-directional. If ``dir="+"``,
then (x->x0+). If ``dir="-", then (x->x0-). For infinite
``x0`` (``oo`` or ``-oo``), the ``dir`` argument is determined
from the direction of the infinity (i.e., ``dir="-"`` for
``oo``).
Examples
========
>>> from sympy import Symbol, series, tan, oo
>>> from sympy.abc import x
>>> f = tan(x)
>>> f.series(x, 2, 6, "+")
tan(2) + (1 + tan(2)**2)*(x - 2) + (x - 2)**2*(tan(2)**3 + tan(2)) +
(x - 2)**3*(1/3 + 4*tan(2)**2/3 + tan(2)**4) + (x - 2)**4*(tan(2)**5 +
5*tan(2)**3/3 + 2*tan(2)/3) + (x - 2)**5*(2/15 + 17*tan(2)**2/15 +
2*tan(2)**4 + tan(2)**6) + O((x - 2)**6, (x, 2))
>>> f.series(x, 2, 3, "-")
tan(2) + (-x + 2)*(-tan(2)**2 - 1) + (-x + 2)**2*(tan(2)**3 + tan(2))
+ O((x - 2)**3, (x, 2))
>>> f.series(x, 2, oo, "+")
Traceback (most recent call last):
...
sympy.core.function.PoleError: Asymptotic expansion of tan around [oo]
is not implemented.
See the docstring of Expr.series() for complete details of this wrapper.
"""
expr = sympify(expr)
return expr.series(x, x0, n, dir)
|
25,032 |
def foo(x, y, z):
if all([x, y, z]) and set(map(lambda n: n % 2, [x, y, z])).issubset({0}):
pass
|
def foo(x, y, z):
if all(i and i%2==0 for i in [x, y, z]):
pass
|
5,937 |
def deprecated(
reason,
replacement,
gone_in,
gone_in_message=DEPRECATION_GONE_IN_MESSAGE,
issue=None,
):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[int]) -> None
"""Helper to deprecate existing functionality.
reason:
Textual reason shown to the user about why this functionality has
been deprecated.
replacement:
Textual suggestion shown to the user about what alternative
functionality they can use.
gone_in:
The version of pip does this functionality should get removed in.
Raises errors if pip's current version is greater than or equal to
this.
gone_in_message:
Template for a textual representation of the pip version when this
functionality will be (or has been) removed.
issue:
Issue number on the tracker that would serve as a useful place for
users to find related discussion and provide feedback.
Always pass replacement, gone_in and issue as keyword arguments for clarity
at the call site.
"""
# Construct a nice message.
# This is eagerly formatted as we want it to get logged as if someone
# typed this entire message out.
sentences = [
(reason, DEPRECATION_MSG_PREFIX + "{}"),
(gone_in, gone_in_message),
(replacement, "A possible replacement is {}."),
(
issue,
(
"You can find discussion regarding this at "
"https://github.com/pypa/pip/issues/{}."
),
),
]
formatted_sentences = [
template.format(val) for val, template in sentences if val is not None
]
message = " ".join(
sentence for sentence in formatted_sentences if sentence != ""
)
# Raise as an error if functionality is gone.
if gone_in is not None and parse(current_version) >= parse(gone_in):
raise PipDeprecationWarning(message)
warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
|
def deprecated(
reason,
replacement,
gone_in,
gone_in_message=DEPRECATION_GONE_IN_MESSAGE,
issue=None,
):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[int]) -> None
"""Helper to deprecate existing functionality.
reason:
Textual reason shown to the user about why this functionality has
been deprecated.
replacement:
Textual suggestion shown to the user about what alternative
functionality they can use.
gone_in:
The version of pip does this functionality should get removed in.
Raises errors if pip's current version is greater than or equal to
this.
gone_in_message:
Template for a textual representation of the pip version when this
functionality will be (or has been) removed.
issue:
Issue number on the tracker that would serve as a useful place for
users to find related discussion and provide feedback.
Always pass replacement, gone_in and issue as keyword arguments for clarity
at the call site.
"""
# Construct a nice message.
# This is eagerly formatted as we want it to get logged as if someone
# typed this entire message out.
sentences = [
(reason, DEPRECATION_MSG_PREFIX + "{}"),
(gone_in, gone_in_message),
(replacement, "A possible replacement is {}."),
(
issue,
(
"You can find discussion regarding this at "
"https://github.com/pypa/pip/issues/{}."
),
),
]
formatted_sentences = [
template.format(val) for val, template in sentences if val is not None
]
message = " ".join(
sentence for sentence in formatted_sentences if sentence != ""
)
# Raise as an error if the functionality is gone.
if gone_in is not None and parse(current_version) >= parse(gone_in):
raise PipDeprecationWarning(message)
warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
|
30,958 |
def get_user_iam(default_base_dn, page_size, args):
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
default_attribute = args.get('defult_attribute', "samaccountname")
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER)
value = ad_user.get(default_attribute)
# removing keys with no values
user = {k: v for k, v in ad_user.items() if v}
attributes = list(user.keys())
query = f'(&(objectClass=User)(objectCategory=person)({default_attribute}={value}))'
try:
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=DEFAULT_LIMIT,
page_size=page_size
)
if not entries.get('flat'):
iam_user_profile.set_result(success=False, error_message="No user was found")
else:
ad_user = entries.get('flat')[0]
user_account_control = ad_user.get('userAccountControl') not in INACTIVE_LIST_OPTIONS
ad_user["userAccountControl"] = user_account_control
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=user_account_control)
user_profile.update_with_app_data(ad_user, INCOMING_MAPPER)
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
|
def get_user_iam(default_base_dn, page_size, args):
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
default_attribute = args.get('defult_attribute', "samaccountname")
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_name, mapping_type=mapping_type)
value = ad_user.get(default_attribute)
# removing keys with no values
user = {k: v for k, v in ad_user.items() if v}
attributes = list(user.keys())
query = f'(&(objectClass=User)(objectCategory=person)({default_attribute}={value}))'
try:
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=DEFAULT_LIMIT,
page_size=page_size
)
if not entries.get('flat'):
iam_user_profile.set_result(success=False, error_message="No user was found")
else:
ad_user = entries.get('flat')[0]
user_account_control = ad_user.get('userAccountControl') not in INACTIVE_LIST_OPTIONS
ad_user["userAccountControl"] = user_account_control
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=user_account_control)
user_profile.update_with_app_data(ad_user, INCOMING_MAPPER)
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
|
106 |
def get_sponsored_editions_civi(user):
"""
Deprecated by get_sponsored_editions but worth maintaining as we
may periodically have to programmatically access data from civi
since it is the ground-truth of this data.
Gets a list of books from the civi API which internet archive
@archive_username has sponsored
:param user user: infogami user
:rtype: list
:return: list of archive.org items sponsored by user
"""
archive_id = get_internet_archive_id(user.key if 'key' in user else user._key)
# MyBooks page breaking on local environments without archive_id check
if archive_id:
contact_id = get_contact_id_by_username(archive_id) if archive_id else None
return get_sponsorships_by_contact_id(contact_id) if contact_id else []
return {}
|
def get_sponsored_editions_civi(user):
"""
Deprecated by get_sponsored_editions but worth maintaining as we
may periodically have to programmatically access data from civi
since it is the ground-truth of this data.
Gets a list of books from the civi API which internet archive
@archive_username has sponsored
:param user user: infogami user
:rtype: list
:return: list of archive.org items sponsored by user
"""
archive_id = get_internet_archive_id(user.key if 'key' in user else user._key)
# MyBooks page breaking on local environments without archive_id check
if archive_id:
contact_id = get_contact_id_by_username(archive_id) if archive_id else None
return get_sponsorships_by_contact_id(contact_id) if contact_id else []
return []
|
56,551 |
def check_keyboard_type(keyboard: object) -> bool:
"""Checks if the keyboard provided is of the correct type - A list of lists."""
if not isinstance(keyboard, list):
return False
for row in keyboard:
if not isinstance(row, list):
return False
return True
|
def check_keyboard_type(keyboard: object) -> bool:
"""Checks if the keyboard provided is of the correct type - A list of lists.
Implicitly tested in the init-tests of `{Inline, Reply}KeyboardMarkup`
"""
if not isinstance(keyboard, list):
return False
for row in keyboard:
if not isinstance(row, list):
return False
return True
|
54,385 |
def test_pyproxy2(selenium):
result = selenium.run_js(
"""
pyodide.runPython(`
class Foo:
pass
f = Foo()
`);
return pyodide.pyimport('f').toString();
"""
)
print(result)
assert result.startswith("<Foo")
|
def test_pyproxy2(selenium):
result = selenium.run_js(
"""
pyodide.runPython(`
class Foo:
pass
f = Foo()
`);
return pyodide.pyimport('f').toString();
"""
)
assert result.startswith("<Foo")
|
2,015 |
def test_minmax_scaler_clip():
# test to add a new paramter 'clip' to MinMaxScaler
X = iris.data
x = np.random.randint(0, 50, (20, 4))
scaler = MinMaxScaler(clip=True)
# default params, feature_range=(0,1)
X_scaled = scaler.fit(X)
x_transformed = X_scaled.transform(x)
assert x_transformed.min() >= 0
assert x_transformed.max() <= 1
|
def test_minmax_scaler_clip():
# test parameter 'clip' in MinMaxScaler
X = iris.data
x = np.random.randint(0, 50, (20, 4))
scaler = MinMaxScaler(clip=True)
# default params, feature_range=(0,1)
X_scaled = scaler.fit(X)
x_transformed = X_scaled.transform(x)
assert x_transformed.min() >= 0
assert x_transformed.max() <= 1
|
43,619 |
def hf_state(n_electrons, m_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock (HF)
state of :math:`N` electrons in a basis of :math:`M` spin orbitals.
The many-particle wave function in the HF approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_M \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right..
**Example**
>>> init_state = hf_state(2, 6)
>>> print(init_state)
[1 1 0 0 0 0]
Args:
n_electrons (int): number of active electrons
m_spin_orbitals (int): number of active **spin-orbitals**
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be > 0; got 'n_electrons' = {}"
.format(n_electrons)
)
if n_electrons > m_spin_orbitals:
raise ValueError(
"The number of active orbitals has to be >= the number of active electrons;"
" got 'm_spin_orbitals'={} < 'n_electrons'={}".format(m_spin_orbitals, n_electrons)
)
hf_state_on = [1 if i < n_electrons else 0 for i in range(m_spin_orbitals)]
return np.array(hf_state_on)
|
def hf_state(n_electrons, m_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock (HF)
state of :math:`N` electrons in a basis of :math:`M` spin orbitals.
The many-particle wave function in the HF approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_M \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right..
**Example**
>>> init_state = hf_state(2, 6)
>>> print(init_state)
[1 1 0 0 0 0]
Args:
n_electrons (int): number of active electrons
m_spin_orbitals (int): number of active **spin-orbitals**
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
"""
if n_electrons <= 0:
raise ValueError(
"The number of active electrons has to be > 0; got 'n_electrons' = {}"
.format(n_electrons)
)
if n_electrons > m_spin_orbitals:
raise ValueError(
"The number of active orbitals has to be >= the number of active electrons;"
" got 'm_spin_orbitals'={} < 'n_electrons'={}".format(m_spin_orbitals, n_electrons)
)
hf_state_on = [1 if i < n_electrons else 0 for i in range(m_spin_orbitals)]
return np.array(hf_state_on)
|
32,267 |
def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str:
"""
This command pushes local changes to the remote system.
Args:
client: XSOAR Client to use.
args:
args['data']: the data to send to the remote system
args['entries']: the entries to send to the remote system
args['incident_changed']: boolean telling us if the local incident indeed changed or not
args['remote_incident_id']: the remote incident id
params:
entry_tags: the tags to pass to the entries (to separate between comments and work_notes)
Returns: The remote incident id - ticket_id
"""
parsed_args = UpdateRemoteSystemArgs(args)
if parsed_args.delta:
demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}')
# ticket_type = client.ticket_type
ticket_id = parsed_args.remote_incident_id
if parsed_args.incident_changed:
demisto.debug(f'Incident changed: {parsed_args.incident_changed}')
# Close ticket if needed
if parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'):
# Set status TOPdesk ticket to Closed
demisto.debug('Close TOPdesk ticket')
# Close with API call or set field and let mirroring handle it.
# client.update_incident
# TODO: Something with updated delta keys or not?
# 'processingStatus', 'priority', 'urgency', 'impact'
update_args = {
'id': ticket_id
}
for key in parsed_args.delta:
update_args[key] = parsed_args.delta[key]
demisto.debug(f'SZU update_args=[{update_args}]')
client.update_incident(update_args)
entries = parsed_args.entries
if entries:
demisto.debug(f'New entries {entries}')
for entry in entries:
demisto.debug(f'Sending entry {entry.get("id")}, type: {entry.get("type")}')
# Mirroring files as entries
if entry.get('type') == 3:
path_res = demisto.getFilePath(entry.get('id'))
full_file_name = path_res.get('name')
file_name, file_extension = os.path.splitext(full_file_name)
if not file_extension:
file_extension = ''
client.attachment_upload(incident_id=ticket_id, incident_number=None, file_entry=entry.get('id'),
file_name=file_name + '_mirrored_from_xsoar' + file_extension,
invisible_for_caller=False,
file_description=f"Upload from xsoar: {file_name}.{file_extension}")
else:
# Mirroring comment and work notes as entries
xargs = {
'id': '',
'action': '',
'action_invisible_for_caller': False,
}
tags = entry.get('tags', [])
if params.get('work_notes_tag') in tags:
xargs['action_invisible_for_caller'] = True
elif params.get('comments_tag') in tags:
xargs['action_invisible_for_caller'] = False
# Sometimes user is an empty str, not None, therefore nothing is displayed
user = entry.get('user', 'dbot')
if (user):
duser = demisto.findUser(username=user)
name = duser['name']
else:
name = 'Xsoar dbot'
text = f"<i><u>Update from {name}:</u></i><br><br>{str(entry.get('contents', ''))}" \
+ "<br><br><i>Mirrored from Cortex XSOAR</i>"
# client.add_comment(ticket_id, ticket_type, key, text)
xargs['id'] = ticket_id
xargs['action'] = text
client.update_incident(xargs)
return ticket_id
|
def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str:
"""
This command pushes local changes to the remote system.
Args:
client: XSOAR Client to use.
args:
args['data']: the data to send to the remote system
args['entries']: the entries to send to the remote system
args['incident_changed']: boolean telling us if the local incident indeed changed or not
args['remote_incident_id']: the remote incident id
params:
entry_tags: the tags to pass to the entries (to separate between comments and work_notes)
Returns: The remote incident id - ticket_id
"""
parsed_args = UpdateRemoteSystemArgs(args)
if parsed_args.delta:
demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}')
# ticket_type = client.ticket_type
ticket_id = parsed_args.remote_incident_id
if parsed_args.incident_changed:
demisto.debug(f'Incident changed: {parsed_args.incident_changed}')
# Close ticket if needed
if parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'):
# Set status TOPdesk ticket to Closed
demisto.debug('Close TOPdesk ticket')
# Close with API call or set field and let mirroring handle it.
# client.update_incident
# TODO: Something with updated delta keys or not?
# 'processingStatus', 'priority', 'urgency', 'impact'
update_args = {
'id': ticket_id
}
for key in parsed_args.delta:
update_args[key] = parsed_args.delta[key]
demisto.debug(f'SZU update_args=[{update_args}]')
client.update_incident(update_args)
entries = parsed_args.entries
if entries:
demisto.debug(f'New entries {entries}')
for entry in entries:
demisto.debug(f'Sending entry {entry.get("id")}, type: {entry.get("type")}')
# Mirroring files as entries
if entry.get('type') == EntryType.FILE:
path_res = demisto.getFilePath(entry.get('id'))
full_file_name = path_res.get('name')
file_name, file_extension = os.path.splitext(full_file_name)
if not file_extension:
file_extension = ''
client.attachment_upload(incident_id=ticket_id, incident_number=None, file_entry=entry.get('id'),
file_name=file_name + '_mirrored_from_xsoar' + file_extension,
invisible_for_caller=False,
file_description=f"Upload from xsoar: {file_name}.{file_extension}")
else:
# Mirroring comment and work notes as entries
xargs = {
'id': '',
'action': '',
'action_invisible_for_caller': False,
}
tags = entry.get('tags', [])
if params.get('work_notes_tag') in tags:
xargs['action_invisible_for_caller'] = True
elif params.get('comments_tag') in tags:
xargs['action_invisible_for_caller'] = False
# Sometimes user is an empty str, not None, therefore nothing is displayed
user = entry.get('user', 'dbot')
if (user):
duser = demisto.findUser(username=user)
name = duser['name']
else:
name = 'Xsoar dbot'
text = f"<i><u>Update from {name}:</u></i><br><br>{str(entry.get('contents', ''))}" \
+ "<br><br><i>Mirrored from Cortex XSOAR</i>"
# client.add_comment(ticket_id, ticket_type, key, text)
xargs['id'] = ticket_id
xargs['action'] = text
client.update_incident(xargs)
return ticket_id
|
7,365 |
def _centered(arr, newshape, axes):
""" Return the center `newshape` portion of `arr`, leaving axes not
in `axes` untouched. """
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
slices = [slice(None, None)] * arr.ndim
for ax in axes:
startind = (currshape[ax] - newshape[ax]) // 2
endind = startind + newshape[ax]
slices[ax] = slice(startind, endind)
return arr[tuple(slices)]
|
def _centered(arr, newshape, axes):
"""Return the center `newshape` portion of `arr`, leaving axes not
in `axes` untouched. """
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
slices = [slice(None, None)] * arr.ndim
for ax in axes:
startind = (currshape[ax] - newshape[ax]) // 2
endind = startind + newshape[ax]
slices[ax] = slice(startind, endind)
return arr[tuple(slices)]
|
41,885 |
def _get_optimization_history_plot_matplotlib(study: Study) -> Figure:
"""Plot optimization history of all trials in a study with matplotlib.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values.
Returns:
A :class:`matplotlib.figure.Figure` object.
"""
# Setup
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Optimization History Plot")
ax.set_xlabel("#Trials")
ax.set_ylabel("Objective Value")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
# Prepare data for plotting
trials = [t for t in study.trials if t.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Study instance does not contain trials.")
return fig
best_values = [float("inf")] if study.direction == StudyDirection.MINIMIZE else [-float("inf")]
comp = min if study.direction == StudyDirection.MINIMIZE else max
for trial in trials:
trial_value = trial.value
assert trial_value is not None # For mypy
best_values.append(comp(best_values[-1], trial_value))
best_values.pop(0)
# Draw graphs
ax.scatter(
x=[t.number for t in trials],
y=[t.value for t in trials],
color=cmap(0),
alpha=1,
label="Objective Value",
)
ax.plot(
[t.number for t in trials],
best_values,
marker="o",
color=cmap(3),
alpha=0.5,
label="Best Value",
)
ax.legend()
return fig
|
def _get_optimization_history_plot_matplotlib(study: Study) -> Figure:
"""Plot optimization history of all trials in a study with matplotlib.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values.
Returns:
A :class:`matplotlib.figure.Figure` object.
"""
# Setup
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Optimization History Plot")
ax.set_xlabel("#Trials")
ax.set_ylabel("Objective Value")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
# Prepare data for plotting.
trials = [t for t in study.trials if t.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Study instance does not contain trials.")
return fig
best_values = [float("inf")] if study.direction == StudyDirection.MINIMIZE else [-float("inf")]
comp = min if study.direction == StudyDirection.MINIMIZE else max
for trial in trials:
trial_value = trial.value
assert trial_value is not None # For mypy
best_values.append(comp(best_values[-1], trial_value))
best_values.pop(0)
# Draw graphs
ax.scatter(
x=[t.number for t in trials],
y=[t.value for t in trials],
color=cmap(0),
alpha=1,
label="Objective Value",
)
ax.plot(
[t.number for t in trials],
best_values,
marker="o",
color=cmap(3),
alpha=0.5,
label="Best Value",
)
ax.legend()
return fig
|
39,494 |
def df_getitem_bool_series_idx_main_codelines(self, idx):
"""Generate main code lines for df.getitem"""
# optimization for default indexes in df and idx when index alignment is trivial
if (isinstance(self.index, types.NoneType) and isinstance(idx.index, types.NoneType)):
func_lines = [f' length = {df_length_expr(self)}',
f' self_index = {df_index_expr(self, as_range=True)}',
f' if length > len(idx):',
f' msg = "Unalignable boolean Series provided as indexer " + \\',
f' "(index of the boolean Series and of the indexed object do not match)."',
f' raise IndexingError(msg)',
f' # do not trim idx._data to length as getitem_by_mask handles such case',
f' res_index = getitem_by_mask(self_index, idx._data)',
f' # df index is default, same as positions so it can be used in take']
results = []
for i, col in enumerate(self.columns):
res_data = f'res_data_{i}'
func_lines += [
f' data_{i} = self._data[{i}]',
f' {res_data} = sdc_take(data_{i}, res_index)'
]
results.append((col, res_data))
data = ', '.join(f'"{col}": {data}' for col, data in results)
func_lines += [
f' return pandas.DataFrame({{{data}}}, index=res_index)'
]
else:
func_lines = [f' length = {df_length_expr(self)}',
f' self_index = self.index',
f' idx_reindexed = sdc_reindex_series(idx._data, idx.index, idx._name, self_index)',
f' res_index = getitem_by_mask(self_index, idx_reindexed._data)',
f' selected_pos = getitem_by_mask(range(0, length), idx_reindexed._data)']
results = []
for i, col in enumerate(self.columns):
res_data = f'res_data_{i}'
func_lines += [
f' data_{i} = self._data[{i}]',
f' {res_data} = sdc_take(data_{i}, selected_pos)'
]
results.append((col, res_data))
data = ', '.join(f'"{col}": {data}' for col, data in results)
func_lines += [
f' return pandas.DataFrame({{{data}}}, index=res_index)'
]
return func_lines
|
def df_getitem_bool_series_idx_main_codelines(self, idx):
"""Generate main code lines for df.getitem"""
# optimization for default indexes in df and idx when index alignment is trivial
if (isinstance(self.index, types.NoneType) and isinstance(idx.index, types.NoneType)):
func_lines = [f' length = {df_length_expr(self)}',
f' self_index = {df_index_expr(self, as_range=True)}',
f' if length > len(idx):',
f' msg = "Unalignable boolean Series provided as indexer " + \\',
f' "(index of the boolean Series and of the indexed object do not match)."',
f' raise IndexingError(msg)',
f' # do not trim idx._data to length as getitem_by_mask handles such case',
f' res_index = getitem_by_mask(self_index, idx._data)',
f' # df index is default, same as positions so it can be used in take']
results = []
for i, col in enumerate(self.columns):
res_data = f'res_data_{i}'
func_lines += [
f' data_{i} = self._data[{i}]',
f' {res_data} = sdc_take(data_{i}, res_index)'
]
results.append((col, res_data))
data = ', '.join(f'"{col}": {data}' for col, data in results)
func_lines += [
f' return pandas.DataFrame({{{data}}}, index=res_index)'
]
else:
func_lines = [f' length = {df_length_expr(self)}',
f' self_index = self.index',
f' idx_reindexed = sdc_reindex_series(idx._data, idx.index, idx._name, self_index)',
f' res_index = getitem_by_mask(self_index, idx_reindexed._data)',
f' selected_pos = getitem_by_mask(range(length), idx_reindexed._data)']
results = []
for i, col in enumerate(self.columns):
res_data = f'res_data_{i}'
func_lines += [
f' data_{i} = self._data[{i}]',
f' {res_data} = sdc_take(data_{i}, selected_pos)'
]
results.append((col, res_data))
data = ', '.join(f'"{col}": {data}' for col, data in results)
func_lines += [
f' return pandas.DataFrame({{{data}}}, index=res_index)'
]
return func_lines
|
12,050 |
def buffer_print_match(buffer, match):
shift = 0
fulltext = match["data"]["lines"]["text"]
try:
dt, nick, msg = fulltext.split("\t", 2)
except ValueError as ex:
print(ex, "\t", fulltext)
return
# parse time str to unix timestamp
try:
timestamp = int(
time.mktime(time.strptime(dt, CONFIG["LOGGER_FILE_TIME_FORMAT"]))
)
except ValueError:
# if we couldn't parse dt the time_format was probably changed
timestamp = 0
colorize_nick = False
# ACTION
if nick.strip() == "*":
nick = msg.split()[0]
# NOTICE
elif nick == CONFIG["WEECHAT_LOOK_PREFIX_NETWORK"] and msg.startswith("Notice("):
nick = msg.split("(", 1)[1].split(")", 1)[0]
# JOIN | PART
elif (
nick == CONFIG["WEECHAT_LOOK_PREFIX_JOIN"]
or nick == CONFIG["WEECHAT_LOOK_PREFIX_JOIN"]
):
nick = msg.split()[0]
else:
# TODO: currently we only colorize in privmsgs
colorize_nick = True
# separate channel mode from nickname
try:
if nick[0] in "~@!%+":
nick = nick.lstrip("@!%+")
except IndexError:
pass
color_highlight = weechat.color("red")
color_default = weechat.color("chat")
color_nick = weechat.info_get("irc_nick_color", nick) or 0
try:
color_nick_number = int(color_nick.replace("\x19F", "", 1)) or 0
except AttributeError:
# likely color_nick is already an int
color_nick_number = color_nick
if colorize_nick and color_nick:
colored_nick = f"{color_nick}{nick}{color_default}"
fulltext = fulltext.replace(nick, colored_nick, 1)
shift += len(colored_nick) - len(nick)
# match highlighting on message, matches are given as byte positions
bytetext = bytearray(bytes(fulltext, "utf-8"))
marker_start = bytes(color_highlight, "utf-8")
marker_end = bytes(color_default, "utf-8")
offset_start = len(marker_start)
offset_end = len(marker_end)
for submatch in match["data"]["submatches"]:
# TODO: highlighting nicknames has issues, so let's skip this area for now
if submatch["end"] < len(fulltext) - len(msg):
continue
start = shift + submatch["start"]
bytetext = bytetext[:start] + marker_start + bytetext[start:]
shift += offset_start
end = shift + submatch["end"]
bytetext = bytetext[:end] + marker_end + bytetext[end:]
shift += offset_end
fulltext = bytetext.decode()
# remove datetime from fulltext if we could parse it
if timestamp:
fulltext = "".join(fulltext.split("\t", 1)[-1:])
weechat.prnt_date_tags(
buffer,
timestamp,
f"no_highlight,nick_{nick},prefix_nick_{color_nick_number}",
fulltext,
)
|
def buffer_print_match(buffer, match):
shift = 0
fulltext = match["data"]["lines"]["text"]
try:
dt, nick, msg = fulltext.split("\t", 2)
except ValueError as ex:
print(ex, "\t", fulltext)
return
# parse time str to unix timestamp
try:
timestamp = int(
time.mktime(time.strptime(dt, CONFIG["LOGGER_FILE_TIME_FORMAT"]))
)
except ValueError:
# if we couldn't parse dt the time_format was probably changed
timestamp = 0
colorize_nick = False
# ACTION
if nick.strip() == "*":
nick = msg.split()[0]
# NOTICE
elif nick == CONFIG["WEECHAT_LOOK_PREFIX_NETWORK"] and msg.startswith("Notice("):
nick = msg.split("(", 1)[1].split(")", 1)[0]
# JOIN | PART
elif (
nick == CONFIG["WEECHAT_LOOK_PREFIX_JOIN"]
or nick == CONFIG["WEECHAT_LOOK_PREFIX_JOIN"]
):
nick = msg.split()[0]
else:
# TODO: currently we only colorize in privmsgs
colorize_nick = True
# separate channel mode from nickname
try:
if nick[0] in "~@!%+":
nick = nick.lstrip("@!%+")
except IndexError:
pass
color_highlight = weechat.color("red")
color_default = weechat.color("chat")
color_nick = weechat.info_get("nick_color", nick) or 0
try:
color_nick_number = int(color_nick.replace("\x19F", "", 1)) or 0
except AttributeError:
# likely color_nick is already an int
color_nick_number = color_nick
if colorize_nick and color_nick:
colored_nick = f"{color_nick}{nick}{color_default}"
fulltext = fulltext.replace(nick, colored_nick, 1)
shift += len(colored_nick) - len(nick)
# match highlighting on message, matches are given as byte positions
bytetext = bytearray(bytes(fulltext, "utf-8"))
marker_start = bytes(color_highlight, "utf-8")
marker_end = bytes(color_default, "utf-8")
offset_start = len(marker_start)
offset_end = len(marker_end)
for submatch in match["data"]["submatches"]:
# TODO: highlighting nicknames has issues, so let's skip this area for now
if submatch["end"] < len(fulltext) - len(msg):
continue
start = shift + submatch["start"]
bytetext = bytetext[:start] + marker_start + bytetext[start:]
shift += offset_start
end = shift + submatch["end"]
bytetext = bytetext[:end] + marker_end + bytetext[end:]
shift += offset_end
fulltext = bytetext.decode()
# remove datetime from fulltext if we could parse it
if timestamp:
fulltext = "".join(fulltext.split("\t", 1)[-1:])
weechat.prnt_date_tags(
buffer,
timestamp,
f"no_highlight,nick_{nick},prefix_nick_{color_nick_number}",
fulltext,
)
|
34,288 |
def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
p0 = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
results = Manager().dict()
p1 = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, status should return 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
|
def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
p0 = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
training_results = Manager().dict()
p1 = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, status should return 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
|
5,630 |
def butter(N, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba', for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter', fs=fs)
|
def butter(N, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [seconds]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter', fs=fs)
|
22,742 |
def prepare_and_parse_args(plugins, args, detect_defaults=False):
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because in the "
"event of key loss or account compromise you will irrevocably "
"lose access to your account. You will also be unable to receive "
"notice about impending expiration or revocation of your "
"certificates. Updates to the Subscriber Agreement will still "
"affect you, and will be effective 14 days after posting an "
"update to the web site.")
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
"automation", "--os-packages-only", action="store_true",
default=flag_default("os_packages_only"),
help="(certbot-auto only) install OS package dependencies and then stop")
helpful.add(
"automation", "--no-self-upgrade", action="store_true",
default=flag_default("no_self_upgrade"),
help="(certbot-auto only) prevent the certbot-auto script from"
" upgrading itself to newer released versions (default: Upgrade"
" automatically)")
helpful.add(
"automation", "--no-bootstrap", action="store_true",
default=flag_default("no_bootstrap"),
help="(certbot-auto only) prevent the certbot-auto script from"
" installing OS-level dependencies (default: Prompt to install "
" OS-wide dependencies, but exit if the user says 'No')")
helpful.add(
"automation", "--no-permissions-check", action="store_true",
default=flag_default("no_permissions_check"),
help="(certbot-auto only) skip the check on the file system"
" permissions of the certbot-auto script")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors, and allow certbot-auto "
"execution on experimental platforms")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
[None, "certonly", "renew"],
"--preferred-chain", dest="preferred_chain",
default=flag_default("preferred_chain"), help=config_help("preferred_chain")
)
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
|
def prepare_and_parse_args(plugins, args, detect_defaults=False):
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because in the "
"event of key loss or account compromise you will irrevocably "
"lose access to your account. You will also be unable to receive "
"notice about impending expiration or revocation of your "
"certificates. Updates to the Subscriber Agreement will still "
"affect you, and will be effective 14 days after posting an "
"update to the web site.")
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
"automation", "--os-packages-only", action="store_true",
default=flag_default("os_packages_only"),
help="(certbot-auto only) install OS package dependencies and then stop")
helpful.add(
"automation", "--no-self-upgrade", action="store_true",
default=flag_default("no_self_upgrade"),
help="(certbot-auto only) prevent the certbot-auto script from"
" upgrading itself to newer released versions (default: Upgrade"
" automatically)")
helpful.add(
"automation", "--no-bootstrap", action="store_true",
default=flag_default("no_bootstrap"),
help="(certbot-auto only) prevent the certbot-auto script from"
" installing OS-level dependencies (default: Prompt to install "
" OS-wide dependencies, but exit if the user says 'No')")
helpful.add(
"automation", "--no-permissions-check", action="store_true",
default=flag_default("no_permissions_check"),
help="(certbot-auto only) skip the check on the file system"
" permissions of the certbot-auto script")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors, and allow certbot-auto "
"execution on experimental platforms")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
[None, "certonly", "renew", "run"],
"--preferred-chain", dest="preferred_chain",
default=flag_default("preferred_chain"), help=config_help("preferred_chain")
)
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
|
21,165 |
def _get_drop_mask(ops: Ops, nO: int, rate: Optional[float]) -> Optional[Floats1d]:
if rate is not None:
mask = ops.get_dropout_mask((nO,), rate)
# type: ignore
return mask
return None
|
def _get_drop_mask(ops: Ops, nO: int, rate: Optional[float]) -> Optional[Floats1d]:
if rate is not None:
mask = ops.get_dropout_mask((nO,), rate)
return mask # type: ignore
return None
|
14,202 |
def get_parser():
prefix_dir = os.path.dirname(os.path.dirname(cocotb.__file__))
version = cocotb.__version__
python_bin = sys.executable
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--prefix",
help="echo the package-prefix of cocotb",
action=PrintAction,
text=prefix_dir,
)
parser.add_argument(
"--share",
help="echo the package-share of cocotb",
action=PrintAction,
text=share_dir,
)
parser.add_argument(
"--makefiles",
help="echo the package-makefiles of cocotb",
action=PrintAction,
text=makefiles_dir,
)
parser.add_argument(
"--python-bin",
help="echo the path to the Python binary cocotb is installed for",
action=PrintAction,
text=python_bin,
)
parser.add_argument(
"--help-vars",
help="show help about supported variables",
action=PrintAction,
text=help_vars_text(),
)
parser.add_argument(
"-v",
"--version",
help="echo the version of cocotb",
action=PrintAction,
text=version,
)
parser.add_argument(
"--libpython",
help="prints the absolute path to the libpython associated with the current Python installation",
action=PrintAction,
text=find_libpython.find_libpython(),
)
parser.add_argument(
"--lib-dir",
help="prints the absolute path to the interface libraties location",
action=PrintAction,
text=libs_dir
)
parser.add_argument(
"--lib-name",
help='prints the name of interface library for given interface (VPI/VHPI/FLI) and simulator',
nargs=2,
metavar=('INTERFACE', 'SIMULATOR'),
action=PrintFuncAction,
function=lib_name
)
parser.add_argument(
"--lib-name-path",
help='prints the absolute path of interface library for given interface (VPI/VHPI/FLI) and simulator',
nargs=2,
metavar=('INTERFACE', 'SIMULATOR'),
action=PrintFuncAction,
function=lib_name_path
)
return parser
|
def get_parser():
prefix_dir = os.path.dirname(os.path.dirname(cocotb.__file__))
version = cocotb.__version__
python_bin = sys.executable
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--prefix",
help="echo the package-prefix of cocotb",
action=PrintAction,
text=prefix_dir,
)
parser.add_argument(
"--share",
help="echo the package-share of cocotb",
action=PrintAction,
text=share_dir,
)
parser.add_argument(
"--makefiles",
help="echo the package-makefiles of cocotb",
action=PrintAction,
text=makefiles_dir,
)
parser.add_argument(
"--python-bin",
help="echo the path to the Python binary cocotb is installed for",
action=PrintAction,
text=python_bin,
)
parser.add_argument(
"--help-vars",
help="show help about supported variables",
action=PrintAction,
text=help_vars_text(),
)
parser.add_argument(
"-v",
"--version",
help="echo the version of cocotb",
action=PrintAction,
text=version,
)
parser.add_argument(
"--libpython",
help="prints the absolute path to the libpython associated with the current Python installation",
action=PrintAction,
text=find_libpython.find_libpython(),
)
parser.add_argument(
"--lib-dir",
help="prints the absolute path to the interface libraries location",
action=PrintAction,
text=libs_dir
)
parser.add_argument(
"--lib-name",
help='prints the name of interface library for given interface (VPI/VHPI/FLI) and simulator',
nargs=2,
metavar=('INTERFACE', 'SIMULATOR'),
action=PrintFuncAction,
function=lib_name
)
parser.add_argument(
"--lib-name-path",
help='prints the absolute path of interface library for given interface (VPI/VHPI/FLI) and simulator',
nargs=2,
metavar=('INTERFACE', 'SIMULATOR'),
action=PrintFuncAction,
function=lib_name_path
)
return parser
|
31,001 |
def workday_first_run_command(client, mapper_in):
report_data = client.get_full_report()
indicators = report_to_indicators(report_data.get('Report_Entry'), mapper_in)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
demisto.results("Indicators were created successfully")
|
def workday_first_run_command(client, mapper_in):
report_data = client.get_full_report()
indicators = report_to_indicators(report_data.get('Report_Entry'), mapper_in)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
return_results("Indicators were created successfully")
|
45,133 |
def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: the name of the feature flag
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the flipper.bucketing module, e.g.
PercentageBucketer, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
|
def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: the name of the feature flag
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the flipper.bucketing module, e.g.
`PercentageBucketer`, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
|
32,834 |
def _set_request_tags(django, span, request):
span.set_tag("django.request.class", func_name(request))
user = getattr(request, "user", None)
if user is not None:
try:
if hasattr(user, "is_authenticated"):
span.set_tag("django.user.is_authenticated", user_is_authenticated(user))
uid = getattr(user, "pk", None)
if uid:
span.set_tag("django.user.id", uid)
if config.django.include_user_name:
username = getattr(user, "username", None)
if username:
span.set_tag("django.user.name", username)
except django.core.exceptions.ImproperlyConfigured:
log.debug("Error authenticating user %r", user, exc_info=True)
|
def _set_request_tags(django, span, request):
span.set_tag("django.request.class", func_name(request))
user = getattr(request, "user", None)
if user is not None:
try:
if hasattr(user, "is_authenticated"):
span.set_tag("django.user.is_authenticated", user_is_authenticated(user))
uid = getattr(user, "pk", None)
if uid:
span.set_tag("django.user.id", uid)
if config.django.include_user_name:
username = getattr(user, "username", None)
if username:
span.set_tag("django.user.name", username)
except django.core.exceptions.ImproperlyConfigured:
log.debug("Error retrieving authentication information for user %r", user, exc_info=True)
|
57,839 |
def main():
params = demisto.params()
aws_default_region = params.get('defaultRegion')
aws_role_arn = params.get('roleArn')
aws_role_session_name = params.get('roleSessionName')
aws_role_session_duration = params.get('sessionDuration')
aws_role_policy = None
aws_access_key_id = params.get('access_key')
aws_secret_access_key = params.get('secret_key')
verify_certificate = not params.get('insecure', True)
timeout = demisto.params().get('timeout')
retries = demisto.params().get('retries') or 5
validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id,
aws_secret_access_key)
aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration,
aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout,
retries)
command = demisto.command()
args = demisto.args()
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if command == 'test-module':
test_function(aws_client)
elif command == 'aws-iam-create-user':
create_user(args, aws_client)
elif command == 'aws-iam-create-login-profile':
create_login_profile(args, aws_client)
elif command == 'aws-iam-get-user':
get_user(args, aws_client)
elif command == 'aws-iam-list-users':
list_users(args, aws_client)
elif command == 'aws-iam-update-user':
update_user(args, aws_client)
elif command == 'aws-iam-delete-user':
delete_user(args, aws_client)
elif command == 'aws-iam-update-login-profile':
update_login_profile(args, aws_client)
elif command == 'aws-iam-create-group':
create_group(args, aws_client)
elif command == 'aws-iam-list-groups':
list_groups(args, aws_client)
elif command == 'aws-iam-list-groups-for-user':
list_groups_for_user(args, aws_client)
elif command == 'aws-iam-create-access-key':
create_access_key(args, aws_client)
elif command == 'aws-iam-update-access-key':
update_access_key(args, aws_client)
elif command == 'aws-iam-list-access-keys-for-user':
list_access_key_for_user(args, aws_client)
elif command == 'aws-iam-list-policies':
list_policies(args, aws_client)
elif command == 'aws-iam-list-roles':
list_roles(args, aws_client)
elif command == 'aws-iam-attach-policy':
attach_policy(args, aws_client)
elif command == 'aws-iam-detach-policy':
detach_policy(args, aws_client)
elif command == 'aws-iam-delete-login-profile':
delete_login_profile(args, aws_client)
elif command == 'aws-iam-add-user-to-group':
add_user_to_group(args, aws_client)
elif command == 'aws-iam-delete-group':
delete_group(args, aws_client)
elif command == 'aws-iam-remove-user-from-group':
remove_user_from_group(args, aws_client)
elif command == 'aws-iam-delete-access-key':
delete_access_key(args, aws_client)
elif command == 'aws-iam-create-instance-profile':
create_instance_profile(args, aws_client)
elif command == 'aws-iam-delete-instance-profile':
delete_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles':
list_instance_profiles(args, aws_client)
elif command == 'aws-iam-add-role-to-instance-profile':
add_role_to_instance_profile(args, aws_client)
elif command == 'aws-iam-remove-role-from-instance-profile':
remove_role_from_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles-for-role':
list_instance_profiles_for_role(args, aws_client)
elif command == 'aws-iam-get-instance-profile':
get_instance_profile(args, aws_client)
elif command == 'aws-iam-get-role':
get_role(args, aws_client)
elif command == 'aws-iam-delete-role':
delete_role(args, aws_client)
elif command == 'aws-iam-create-role':
create_role(args, aws_client)
elif command == 'aws-iam-create-policy':
create_policy(args, aws_client)
elif command == 'aws-iam-delete-policy':
delete_policy(args, aws_client)
elif command == 'aws-iam-create-policy-version':
create_policy_version(args, aws_client)
elif command == 'aws-iam-delete-policy-version':
delete_policy_version(args, aws_client)
elif command == 'aws-iam-list-policy-versions':
list_policy_versions(args, aws_client)
elif command == 'aws-iam-get-policy-version':
get_policy_version(args, aws_client)
elif command == 'aws-iam-set-default-policy-version':
set_default_policy_version(args, aws_client)
elif command == 'aws-iam-create-account-alias':
create_account_alias(args, aws_client)
elif command == 'aws-iam-delete-account-alias':
delete_account_alias(args, aws_client)
elif command == 'aws-iam-get-account-password-policy':
get_account_password_policy(args, aws_client)
elif command == 'aws-iam-update-account-password-policy':
update_account_password_policy(args, aws_client)
except Exception as e:
LOG(str(e))
return_error('Error has occurred in the AWS IAM Integration: {code}\n {message}'.format(
code=type(e), message=str(e)))
|
def main():
params = demisto.params()
aws_default_region = params.get('defaultRegion')
aws_role_arn = params.get('roleArn')
aws_role_session_name = params.get('roleSessionName')
aws_role_session_duration = params.get('sessionDuration')
aws_role_policy = None
aws_access_key_id = params.get('access_key')
aws_secret_access_key = params.get('secret_key')
verify_certificate = not params.get('insecure', True)
timeout = params.get('timeout')
retries = params.get('retries') or 5
validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id,
aws_secret_access_key)
aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration,
aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout,
retries)
command = demisto.command()
args = demisto.args()
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if command == 'test-module':
test_function(aws_client)
elif command == 'aws-iam-create-user':
create_user(args, aws_client)
elif command == 'aws-iam-create-login-profile':
create_login_profile(args, aws_client)
elif command == 'aws-iam-get-user':
get_user(args, aws_client)
elif command == 'aws-iam-list-users':
list_users(args, aws_client)
elif command == 'aws-iam-update-user':
update_user(args, aws_client)
elif command == 'aws-iam-delete-user':
delete_user(args, aws_client)
elif command == 'aws-iam-update-login-profile':
update_login_profile(args, aws_client)
elif command == 'aws-iam-create-group':
create_group(args, aws_client)
elif command == 'aws-iam-list-groups':
list_groups(args, aws_client)
elif command == 'aws-iam-list-groups-for-user':
list_groups_for_user(args, aws_client)
elif command == 'aws-iam-create-access-key':
create_access_key(args, aws_client)
elif command == 'aws-iam-update-access-key':
update_access_key(args, aws_client)
elif command == 'aws-iam-list-access-keys-for-user':
list_access_key_for_user(args, aws_client)
elif command == 'aws-iam-list-policies':
list_policies(args, aws_client)
elif command == 'aws-iam-list-roles':
list_roles(args, aws_client)
elif command == 'aws-iam-attach-policy':
attach_policy(args, aws_client)
elif command == 'aws-iam-detach-policy':
detach_policy(args, aws_client)
elif command == 'aws-iam-delete-login-profile':
delete_login_profile(args, aws_client)
elif command == 'aws-iam-add-user-to-group':
add_user_to_group(args, aws_client)
elif command == 'aws-iam-delete-group':
delete_group(args, aws_client)
elif command == 'aws-iam-remove-user-from-group':
remove_user_from_group(args, aws_client)
elif command == 'aws-iam-delete-access-key':
delete_access_key(args, aws_client)
elif command == 'aws-iam-create-instance-profile':
create_instance_profile(args, aws_client)
elif command == 'aws-iam-delete-instance-profile':
delete_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles':
list_instance_profiles(args, aws_client)
elif command == 'aws-iam-add-role-to-instance-profile':
add_role_to_instance_profile(args, aws_client)
elif command == 'aws-iam-remove-role-from-instance-profile':
remove_role_from_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles-for-role':
list_instance_profiles_for_role(args, aws_client)
elif command == 'aws-iam-get-instance-profile':
get_instance_profile(args, aws_client)
elif command == 'aws-iam-get-role':
get_role(args, aws_client)
elif command == 'aws-iam-delete-role':
delete_role(args, aws_client)
elif command == 'aws-iam-create-role':
create_role(args, aws_client)
elif command == 'aws-iam-create-policy':
create_policy(args, aws_client)
elif command == 'aws-iam-delete-policy':
delete_policy(args, aws_client)
elif command == 'aws-iam-create-policy-version':
create_policy_version(args, aws_client)
elif command == 'aws-iam-delete-policy-version':
delete_policy_version(args, aws_client)
elif command == 'aws-iam-list-policy-versions':
list_policy_versions(args, aws_client)
elif command == 'aws-iam-get-policy-version':
get_policy_version(args, aws_client)
elif command == 'aws-iam-set-default-policy-version':
set_default_policy_version(args, aws_client)
elif command == 'aws-iam-create-account-alias':
create_account_alias(args, aws_client)
elif command == 'aws-iam-delete-account-alias':
delete_account_alias(args, aws_client)
elif command == 'aws-iam-get-account-password-policy':
get_account_password_policy(args, aws_client)
elif command == 'aws-iam-update-account-password-policy':
update_account_password_policy(args, aws_client)
except Exception as e:
LOG(str(e))
return_error('Error has occurred in the AWS IAM Integration: {code}\n {message}'.format(
code=type(e), message=str(e)))
|
5,407 |
def reboot(name, conn=None):
"""
Reboot a single VM
"""
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error("Unable to find the VM %s", name)
log.info("Rebooting VM: %s", name)
ret = conn.reboot_node(node)
if ret:
log.info("Rebooted VM: %s", name)
# Fire reboot action
__utils__["cloud.fire_event"](
"event",
"{} has been rebooted".format(name),
"salt-cloudsalt/cloud/{}/rebooting".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return True
log.error("Failed to reboot VM: %s", name)
return False
|
def reboot(name, conn=None):
"""
Reboot a single VM
"""
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error("Unable to find the VM %s", name)
log.info("Rebooting VM: %s", name)
ret = conn.reboot_node(node)
if ret:
log.info("Rebooted VM: %s", name)
# Fire reboot action
__utils__["cloud.fire_event"](
"event",
"{} has been rebooted".format(name),
"salt/cloud/{}/rebooting".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return True
log.error("Failed to reboot VM: %s", name)
return False
|
12,271 |
def _gate_processor(command):
'''
Process tokens for a gate call statement separating them into args and regs.
Processes tokens from a "gate call" (e.g. rx(pi) q[0]) and returns the
tokens for the arguments and registers separtely.
'''
gate_args = []
gate_regs = []
tokens = command[1:]
reg_start = 0
# extract arguments
if "(" in tokens and ")" in tokens:
bopen = tokens.index("(")
bclose = tokens.index(")")
gate_args = tokens[bopen+1:bclose]
reg_start = bclose+1
# extract registers
gate_regs = tokens[reg_start:]
return gate_args, gate_regs
|
def _gate_processor(command):
'''
Process tokens for a gate call statement separating them into args and regs.
Processes tokens from a "gate call" (e.g. rx(pi) q[0]) and returns the
tokens for the arguments and registers separately.
'''
gate_args = []
gate_regs = []
tokens = command[1:]
reg_start = 0
# extract arguments
if "(" in tokens and ")" in tokens:
bopen = tokens.index("(")
bclose = tokens.index(")")
gate_args = tokens[bopen+1:bclose]
reg_start = bclose+1
# extract registers
gate_regs = tokens[reg_start:]
return gate_args, gate_regs
|
7,126 |
def perimeter(image, neighbourhood=4):
"""Calculate total perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D binary image.
neighbourhood : 4 or 8, optional
Neighborhood connectivity for border pixel determination. It's used to
compute the contour. A higher neighbourhood widens the border on which
the perimeter is computed.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
References
----------
.. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
# coins image (binary)
>>> img_coins = util.img_as_ubyte(data.coins()) > 110
# total perimeter of all objects in the image
>>> perimeter(img_coins, neighbourhood=4)
7796.8679964360044
>>> perimeter(img_coins, neighbourhood=8)
8806.2680733252855
"""
if image.ndim > 2:
raise NotImplementedError('perimeter does not support 3D images')
if neighbourhood == 4:
strel = STREL_4
else:
strel = STREL_8
image = image.astype(np.uint8)
eroded_image = ndi.binary_erosion(image, strel, border_value=0)
border_image = image - eroded_image
perimeter_weights = np.zeros(50, dtype=np.double)
perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
perimeter_weights[[21, 33]] = sqrt(2)
perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2
perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10],
[ 2, 1, 2],
[10, 2, 10]]),
mode='constant', cval=0)
# You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + np.dot (5x
# as much time)
perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50)
total_perimeter = perimeter_histogram @ perimeter_weights
return total_perimeter
|
def perimeter(image, neighbourhood=4):
"""Calculate total perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D binary image.
neighbourhood : 4 or 8, optional
Neighborhood connectivity for border pixel determination. It's used to
compute the contour. A higher neighbourhood widens the border on which
the perimeter is computed.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
References
----------
.. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
Examples
--------
>>> from skimage import data, util
>>> from skimage.measure import label
# coins image (binary)
>>> img_coins = util.img_as_ubyte(data.coins()) > 110
# total perimeter of all objects in the image
>>> perimeter(img_coins, neighbourhood=4)
7796.8679964360044
>>> perimeter(img_coins, neighbourhood=8)
8806.2680733252855
"""
if image.ndim > 2:
raise NotImplementedError('`perimeter` supports 2D images only')
if neighbourhood == 4:
strel = STREL_4
else:
strel = STREL_8
image = image.astype(np.uint8)
eroded_image = ndi.binary_erosion(image, strel, border_value=0)
border_image = image - eroded_image
perimeter_weights = np.zeros(50, dtype=np.double)
perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
perimeter_weights[[21, 33]] = sqrt(2)
perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2
perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10],
[ 2, 1, 2],
[10, 2, 10]]),
mode='constant', cval=0)
# You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + np.dot (5x
# as much time)
perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50)
total_perimeter = perimeter_histogram @ perimeter_weights
return total_perimeter
|
39,749 |
def modify_span_sqs(span, args, kwargs):
if span.id:
trace_parent = span.transaction.trace_parent.copy_from(span_id=span.id)
else:
# this is a dropped span, use transaction id instead
transaction = execution_context.get_transaction()
trace_parent = transaction.trace_parent.copy_from(span_id=transaction.id)
attributes = {constants.TRACEPARENT_HEADER_NAME: {"DataType": "String", "StringValue": trace_parent.to_string()}}
if trace_parent.tracestate:
attributes[constants.TRACESTATE_HEADER_NAME] = {"DataType": "String", "StringValue": trace_parent.tracestate}
if len(args) > 1:
attributes_count = len(attributes)
if "MessageAttributes" in args[1]:
messages = [args[1]]
# both send_batch and delete_batch use the same "Entries" list. We only want to add the
# traceparent to send_batch. We use the existence of ReceiptHandle to differentiate between the two
elif "Entries" in args[1] and args[1]["Entries"] and "ReceiptHandle" not in args[1]["Entries"][0]:
messages = args[1]["Entries"]
else:
messages = []
for message in messages:
message["MessageAttributes"] = message.get("MessageAttributes") or {}
if len(message["MessageAttributes"]) + attributes_count <= SQS_MAX_ATTRIBUTES:
message["MessageAttributes"].update(attributes)
else:
logger.info("Not adding disttracing headers to message due to attribute limit reached")
|
def modify_span_sqs(span, args, kwargs):
if span.id:
trace_parent = span.transaction.trace_parent.copy_from(span_id=span.id)
else:
# this is a dropped span, use transaction id instead
transaction = execution_context.get_transaction()
trace_parent = transaction.trace_parent.copy_from(span_id=transaction.id)
attributes = {constants.TRACEPARENT_HEADER_NAME: {"DataType": "String", "StringValue": trace_parent.to_string()}}
if trace_parent.tracestate:
attributes[constants.TRACESTATE_HEADER_NAME] = {"DataType": "String", "StringValue": trace_parent.tracestate}
if len(args) > 1:
attributes_count = len(attributes)
if "MessageAttributes" in args[1]:
messages = [args[1]]
# both send_batch and delete_batch use the same "Entries" list. We only want to add the
# traceparent to send_batch. We use the existence of ReceiptHandle to differentiate between the two
elif "Entries" in args[1] and args[1]["Entries"] and "ReceiptHandle" not in args[1]["Entries"][0]:
messages = args[1]["Entries"]
else:
messages = []
for message in messages:
message["MessageAttributes"] = message.get("MessageAttributes", {})
if len(message["MessageAttributes"]) + attributes_count <= SQS_MAX_ATTRIBUTES:
message["MessageAttributes"].update(attributes)
else:
logger.info("Not adding disttracing headers to message due to attribute limit reached")
|
36,379 |
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if not os.path.exists(path):
raise FileNotFoundError("Cannot call rmtree on a non-existent path")
except:
onerror(os.path.exists, path, sys.exc_info())
return
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
return _rmtree_unsafe(path, onerror)
|
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if not os.path.exists(path):
raise FileNotFoundError(errno.ENOENT, "Cannot call rmtree on a non-existent path", path)
except:
onerror(os.path.exists, path, sys.exc_info())
return
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
return _rmtree_unsafe(path, onerror)
|
20,021 |
def segment_curvature(segmented_img, objects):
""" Calculate segment curvature as defined by the ratio between geodesic and euclidean distance.
Measurement of two-dimensional tortuosity.
Inputs:
segmented_img = Segmented image to plot lengths on
objects = List of contours
Returns:
labeled_img = Segmented debugging image with curvature labeled
:param segmented_img: numpy.ndarray
:param objects: list
:return labeled_img: numpy.ndarray
"""
label_coord_x = []
label_coord_y = []
labeled_img = segmented_img.copy()
# Store debug
debug = params.debug
params.debug = None
_ = segment_euclidean_length(segmented_img, objects)
_ = segment_path_length(segmented_img, objects)
eu_lengths = outputs.observations['segment_eu_length']['value']
path_lengths = outputs.observations['segment_path_length']['value']
curvature_measure = [float(x/y) for x, y in zip(path_lengths, eu_lengths)]
rand_color = color_palette(len(objects))
for i, cnt in enumerate(objects):
# Store coordinates for labels
label_coord_x.append(objects[i][0][0][0])
label_coord_y.append(objects[i][0][0][1])
# Draw segments one by one to group segment tips together
finding_tips_img = np.zeros(segmented_img.shape[:2], np.uint8)
cv2.drawContours(finding_tips_img, objects, i, (255, 255, 255), 1, lineType=8)
segment_tips = find_tips(finding_tips_img)
tip_objects, tip_hierarchies = find_objects(segment_tips, segment_tips)
points = []
for t in tip_objects:
# Gather pairs of coordinates
x, y = t.ravel()
coord = (x, y)
points.append(coord)
# Draw euclidean distance lines
cv2.line(labeled_img, points[0], points[1], rand_color[i], 1)
segment_ids = []
# Reset debug mode
params.debug = debug
for i, cnt in enumerate(objects):
# Calculate geodesic distance
text = "{:.3f}".format(curvature_measure[i])
w = label_coord_x[i]
h = label_coord_y[i]
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
# segment_label = "ID" + str(i)
segment_ids.append(i)
outputs.add_observation(variable='segment_curvature', trait='segment curvature',
method='plantcv.plantcv.morphology.segment_curvature', scale='none', datatype=list,
value=curvature_measure, label=segment_ids)
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_curvature.png'))
elif params.debug == 'plot':
plot_image(labeled_img)
return labeled_img
|
def segment_curvature(segmented_img, objects):
""" Calculate segment curvature as defined by the ratio between geodesic and euclidean distance.
Measurement of two-dimensional tortuosity.
Inputs:
segmented_img = Segmented image to plot lengths on
objects = List of contours
Returns:
labeled_img = Segmented debugging image with curvature labeled
:param segmented_img: numpy.ndarray
:param objects: list
:return labeled_img: numpy.ndarray
"""
label_coord_x = []
label_coord_y = []
labeled_img = segmented_img.copy()
# Store debug
debug = params.debug
params.debug = None
_ = segment_euclidean_length(segmented_img, objects)
_ = segment_path_length(segmented_img, objects)
eu_lengths = outputs.observations['segment_eu_length']['value']
path_lengths = outputs.observations['segment_path_length']['value']
curvature_measure = [float(x / y) for x, y in zip(path_lengths, eu_lengths)]
rand_color = color_palette(len(objects))
for i, cnt in enumerate(objects):
# Store coordinates for labels
label_coord_x.append(objects[i][0][0][0])
label_coord_y.append(objects[i][0][0][1])
# Draw segments one by one to group segment tips together
finding_tips_img = np.zeros(segmented_img.shape[:2], np.uint8)
cv2.drawContours(finding_tips_img, objects, i, (255, 255, 255), 1, lineType=8)
segment_tips = find_tips(finding_tips_img)
tip_objects, tip_hierarchies = find_objects(segment_tips, segment_tips)
points = []
for t in tip_objects:
# Gather pairs of coordinates
x, y = t.ravel()
coord = (x, y)
points.append(coord)
# Draw euclidean distance lines
cv2.line(labeled_img, points[0], points[1], rand_color[i], 1)
segment_ids = []
# Reset debug mode
params.debug = debug
for i, cnt in enumerate(objects):
# Calculate geodesic distance
text = "{:.3f}".format(curvature_measure[i])
w = label_coord_x[i]
h = label_coord_y[i]
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
# segment_label = "ID" + str(i)
segment_ids.append(i)
outputs.add_observation(variable='segment_curvature', trait='segment curvature',
method='plantcv.plantcv.morphology.segment_curvature', scale='none', datatype=list,
value=curvature_measure, label=segment_ids)
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_curvature.png'))
elif params.debug == 'plot':
plot_image(labeled_img)
return labeled_img
|
32,301 |
def main():
# Args is always stronger. Get last run even stronger
demisto_params = demisto.params() | demisto.args() | demisto.getLastRun()
demisto_params['client_id'] = demisto_params['auth_credendtials']['password']
events_to_add_per_request = demisto_params.get('events_to_add_per_request', 1000)
try:
events_to_add_per_request = int(events_to_add_per_request)
except ValueError:
events_to_add_per_request = 1000
request = Request(**demisto_params)
url = urljoin(demisto_params.get("url"), 'services/oauth2/token')
request.url = f'{url}?grant_type=password&' \
f'client_id={demisto_params.get("client_id")}&' \
f'client_secret={demisto_params.get("client_secret")}&' \
f'username={demisto_params.get("username")}&' \
f'password={demisto_params.get("password")}'
client = Client(request)
after = get_timestamp_format(demisto_params.get('after'))
get_events = GetEvents(client, demisto_params.get('verify'),
demisto_params.get('query'), after, demisto_params.get('last_id'))
command = demisto.command()
try:
urllib3.disable_warnings()
if command == 'test-module':
get_events.aggregated_results(limit=1)
return_results('ok')
elif command == 'salesforce-get-events' or command == 'fetch-events':
events = get_events.aggregated_results(limit=int(demisto_params.get('limit')))
if command == 'fetch-events':
while len(events) > 0:
send_events_to_xsiam(events[:events_to_add_per_request], 'salesforce-audit', 'salesforce-audit')
events = events[events_to_add_per_request:]
elif command == 'salesforce-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('salesforce Logs', events, headerTransform=pascalToSpace),
outputs_prefix='salesforce.Logs',
outputs_key_field='timestamp',
outputs=events,
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(str(e))
|
def main():
# Args is always stronger. Get last run even stronger
demisto_params = demisto.params() | demisto.args() | demisto.getLastRun()
demisto_params['client_id'] = demisto_params['auth_credendtials']['password']
events_to_add_per_request = demisto_params.get('events_to_add_per_request', 1000)
try:
events_to_add_per_request = int(events_to_add_per_request)
except ValueError:
events_to_add_per_request = 1000
request = Request(**demisto_params)
url = urljoin(demisto_params.get("url"), 'services/oauth2/token')
request.url = f'{url}?grant_type=password&' \
f'client_id={demisto_params.get("client_id")}&' \
f'client_secret={demisto_params.get("client_secret")}&' \
f'username={demisto_params.get("username")}&' \
f'password={demisto_params.get("password")}'
client = Client(request)
after = get_timestamp_format(demisto_params.get('after'))
get_events = GetEvents(client, demisto_params.get('verify'),
demisto_params.get('query'), after, demisto_params.get('last_id'))
command = demisto.command()
try:
urllib3.disable_warnings()
if command == 'test-module':
get_events.aggregated_results(limit=1)
return_results('ok')
elif command in ('salesforce-get-events', 'fetch-events'):
events = get_events.aggregated_results(limit=int(demisto_params.get('limit')))
if command == 'fetch-events':
while len(events) > 0:
send_events_to_xsiam(events[:events_to_add_per_request], 'salesforce-audit', 'salesforce-audit')
events = events[events_to_add_per_request:]
elif command == 'salesforce-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('salesforce Logs', events, headerTransform=pascalToSpace),
outputs_prefix='salesforce.Logs',
outputs_key_field='timestamp',
outputs=events,
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(str(e))
|
30,924 |
def get_group_state_command():
groupID = args.get('id')
page = 'groups/'
URL = baseURL + page + groupID
response = requests.get(URL)
if not response.ok:
error = "Error in request {} - {}".format(response.status_code, response.text)
raise Exception(text)
data = response.json()
return data
|
def get_group_state_command():
groupID = args.get('id')
page = 'groups/'
URL = baseURL + page + groupID
response = requests.get(URL)
if not response.ok:
error = "Error in request {} - {}".format(response.status_code, response.text)
raise Exception(error)
data = response.json()
return data
|
29,814 |
def validate_tron(service_path, verbose=False):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
elif verbose:
service_config = load_tron_service_config_no_cache(service, cluster)
for config in service_config:
schedule = config.get_schedule()
num_runs = 5
if schedule.startswith("cron"):
print(info_message(f"Next 5 cron runs for {config.get_name()}"))
next_cron_runs = get_next_x_cron_runs(
num_runs, schedule.replace("cron", ""), datetime.today()
)
for run in next_cron_runs:
print(f"{run}")
return returncode
|
def validate_tron(service_path: str, verbose: bool = False) -> bool:
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
elif verbose:
service_config = load_tron_service_config_no_cache(service, cluster)
for config in service_config:
schedule = config.get_schedule()
num_runs = 5
if schedule.startswith("cron"):
print(info_message(f"Next 5 cron runs for {config.get_name()}"))
next_cron_runs = get_next_x_cron_runs(
num_runs, schedule.replace("cron", ""), datetime.today()
)
for run in next_cron_runs:
print(f"{run}")
return returncode
|
3,020 |
def interpolate_1d_fill(
values,
method="pad",
axis=0,
limit=None,
limit_area=None,
fill_value=None,
dtype=None,
):
"""
This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad`
and `backfill` when interpolating. This 1D-version is necessary to be
able to handle kwarg `limit_area` via the function
` _derive_indices_of_nans_to_preserve`. It is used the same way as the
1D-interpolation functions which are based on scipy-interpolation, i.e.
via np.apply_along_axis.
"""
if method == "pad":
limit_direction = "forward"
elif method == "backfill":
limit_direction = "backward"
else:
raise ValueError("`method` must be either 'pad' or 'backfill'.")
orig_values = values
yvalues = values
invalid = isna(yvalues)
valid = ~invalid
if values.ndim > 1:
raise AssertionError("This only works with 1D data.")
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(values, fill_value)
preserve_nans = _derive_indices_of_nans_to_preserve(
yvalues=yvalues,
valid=valid,
invalid=invalid,
limit=limit,
limit_area=limit_area,
limit_direction=limit_direction,
)
method = clean_fill_method(method)
if method == "pad":
values = pad_1d(values, limit=limit, mask=mask, dtype=dtype)
else:
values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype)
if orig_values.dtype.kind == "M":
# convert float back to datetime64
values = values.astype(orig_values.dtype)
values[preserve_nans] = fill_value
return values
|
def interpolate_1d_fill(
values,
method="pad",
axis: Axis = 0,
limit=None,
limit_area=None,
fill_value=None,
dtype=None,
):
"""
This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad`
and `backfill` when interpolating. This 1D-version is necessary to be
able to handle kwarg `limit_area` via the function
` _derive_indices_of_nans_to_preserve`. It is used the same way as the
1D-interpolation functions which are based on scipy-interpolation, i.e.
via np.apply_along_axis.
"""
if method == "pad":
limit_direction = "forward"
elif method == "backfill":
limit_direction = "backward"
else:
raise ValueError("`method` must be either 'pad' or 'backfill'.")
orig_values = values
yvalues = values
invalid = isna(yvalues)
valid = ~invalid
if values.ndim > 1:
raise AssertionError("This only works with 1D data.")
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(values, fill_value)
preserve_nans = _derive_indices_of_nans_to_preserve(
yvalues=yvalues,
valid=valid,
invalid=invalid,
limit=limit,
limit_area=limit_area,
limit_direction=limit_direction,
)
method = clean_fill_method(method)
if method == "pad":
values = pad_1d(values, limit=limit, mask=mask, dtype=dtype)
else:
values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype)
if orig_values.dtype.kind == "M":
# convert float back to datetime64
values = values.astype(orig_values.dtype)
values[preserve_nans] = fill_value
return values
|
2,159 |
def multiclass_brier_score_loss(y_true, y_prob, sample_weight=None,
labels=None):
r"""Compute the Brier score loss.
The smaller the Brier score loss, the better, hence the naming with "loss".
The Brier score measures the mean squared difference between the predicted
probability and the actual outcome.
For :math:`N` samples with :math:`C` different classes, the multi-class
Brier score is defined as:
.. math::
\frac{1}{N}\sum_{i=1}^{N}\sum_{c=1}^{C}(y_{ic} - \bar{y}_{ic})^{2}
where :math:`y_{ic}` is 1 if observation `i` belongs to class `c`,
otherwise 0 and :math:`\bar{y}_{ic}` is the predicted probability of
observation `i` for class `c`. The probabilities for `c` classes for
observation `i` should sum to 1.
The Brier score always takes on a value between [0, 2]. For the
binary case however, there is a more common definition of Brier score
implemented in :func:`brier_score_loss` that is exactly half of the value
returned by this function, thereby having a range between [0, 1].
It can be decomposed as the sum of refinement loss and calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another).
Read more in the :ref:`User Guide <brier_score_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True targets.
y_prob : array-like of float, shape=(n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_prob`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
Returns
-------
score : float
Brier score loss.
Examples
--------
>>> from sklearn.metrics import multiclass_brier_score_loss
>>> multiclass_brier_score_loss([0, 1, 1, 0],
... [0.1, 0.9, 0.8, 0.3])
0.074...
>>> multiclass_brier_score_loss(['eggs', 'ham', 'spam'], [[.8, .1, .1],
... [.2, .7, .1],
... [.2, .2, .6]])
0.146...
References
----------
.. [1] `Wikipedia entry for the Brier score
<https://en.wikipedia.org/wiki/Brier_score>`_.
"""
y_true = column_or_1d(y_true)
y_prob = check_array(y_prob, ensure_2d=False)
check_consistent_length(y_prob, y_true, sample_weight)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
lb = LabelBinarizer()
if labels is not None:
lb = lb.fit(labels)
else:
lb = lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError(f'y_true contains only one label: '
f'{lb.classes_[0]}. Please provide the true '
f'labels explicitly through the labels argument.')
else:
raise ValueError(f'The labels array needs to contain at least two '
f'labels, got {lb.classes_}.')
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1-transformed_labels,
transformed_labels, axis=1)
# If y_prob is of single dimension, assume y_true to be binary
if y_prob.ndim == 1:
y_prob = y_prob[:, np.newaxis]
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_prob.shape[1]:
if labels is None:
raise ValueError(f"y_true and y_prob contain different number of "
f"classes {transformed_labels.shape[1]}, "
f"{y_prob.shape[1]}. Please provide the true "
f"labels explicitly through the labels argument. "
f"Classes found in y_true: {lb.classes_}")
else:
raise ValueError(f'The number of classes in labels is different '
f'from that in y_prob. Classes found in '
f'labels: {lb.classes_}')
return np.average(np.sum((transformed_labels - y_prob) ** 2, axis=1),
weights=sample_weight)
|
def multiclass_brier_score_loss(y_true, y_prob, sample_weight=None,
labels=None):
r"""Compute the Brier score loss.
The smaller the Brier score loss, the better, hence the naming with "loss".
The Brier score measures the mean squared difference between the predicted
probability and the actual outcome.
For :math:`N` samples with :math:`C` different classes, the multi-class
Brier score is defined as:
.. math::
\frac{1}{N}\sum_{i=1}^{N}\sum_{c=1}^{C}(y_{ic} - \bar{y}_{ic})^{2}
where :math:`y_{ic}` is 1 if observation `i` belongs to class `c`,
otherwise 0 and :math:`\bar{y}_{ic}` is the predicted probability of
observation `i` for class `c`. The probabilities for `c` classes for
observation `i` should sum to 1.
The Brier score always takes on a value between [0, 2]. For the
binary case however, there is a more common definition of Brier score
implemented in :func:`brier_score_loss` that is exactly half of the value
returned by this function, thereby having a range between [0, 1].
It can be decomposed as the sum of refinement loss and calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another).
Read more in the :ref:`User Guide <brier_score_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True targets.
y_prob : array-like of float, shape=(n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_prob`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_prob`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
Returns
-------
score : float
Brier score loss.
Examples
--------
>>> from sklearn.metrics import multiclass_brier_score_loss
>>> multiclass_brier_score_loss([0, 1, 1, 0],
... [0.1, 0.9, 0.8, 0.3])
0.074...
>>> multiclass_brier_score_loss(['eggs', 'ham', 'spam'], [[.8, .1, .1],
... [.2, .7, .1],
... [.2, .2, .6]])
0.146...
References
----------
.. [1] `Wikipedia entry for the Brier score
<https://en.wikipedia.org/wiki/Brier_score>`_.
"""
y_true = column_or_1d(y_true)
y_prob = check_array(y_prob, ensure_2d=False)
check_consistent_length(y_prob, y_true, sample_weight)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
lb = LabelBinarizer()
if labels is not None:
lb = lb.fit(labels)
else:
lb = lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError(f'y_true contains only one label: '
f'{lb.classes_[0]}. Please provide the true '
f'labels explicitly through the labels argument.')
else:
raise ValueError(f'The labels array needs to contain at least two '
f'labels, got {lb.classes_}.')
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1-transformed_labels,
transformed_labels, axis=1)
# If y_prob is of single dimension, assume y_true to be binary
if y_prob.ndim == 1:
y_prob = y_prob[:, np.newaxis]
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_prob.shape[1]:
if labels is None:
raise ValueError(f"y_true and y_prob contain different number of "
f"classes {transformed_labels.shape[1]}, "
f"{y_prob.shape[1]}. Please provide the true "
f"labels explicitly through the labels argument. "
f"Classes found in y_true: {lb.classes_}")
else:
raise ValueError(f'The number of classes in labels is different '
f'from that in y_prob. Classes found in '
f'labels: {lb.classes_}')
return np.average(np.sum((transformed_labels - y_prob) ** 2, axis=1),
weights=sample_weight)
|
25,119 |
def skip_test_env_var(name):
""" Checks for environment variables indicating whether tests requiring services should be run
"""
value = os.environ.get(name, '0')
return pytest.mark.skipif(value == '0', reason='Tests not enabled via environment variable')
|
def skip_test_env_var(name):
""" Checks for environment variables indicating whether tests requiring services should be run
"""
value = os.environ.get(name, '') or '0'
return pytest.mark.skipif(value == '0', reason='Tests not enabled via environment variable')
|
40,771 |
def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path],) -> Tuple[Path, str]:
"""
Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``
to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
Args:
checkpoint_path: Path to the checkpoint file.
output_dir: Output directory to store the hashed checkpoint file.
(will be created if not exist)
Returns:
Path to the hashed checkpoint file, The first 8 digits of SHA256 hash.
.. versionadded:: 0.5.0
"""
if isinstance(checkpoint_path, str):
checkpoint_path = Path(checkpoint_path)
if isinstance(output_dir, str):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
sha_hash = hashlib.sha256(checkpoint_path.read_bytes()).hexdigest()
old_filename = checkpoint_path.stem
new_filename = "-".join((old_filename, sha_hash[:8])) + ".pt"
hash_checkpoint_path = output_dir / new_filename
shutil.move(str(checkpoint_path), hash_checkpoint_path)
return hash_checkpoint_path, sha_hash
|
def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path],) -> Tuple[Path, str]:
"""
Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``
to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
Args:
checkpoint_path: Path to the checkpoint file.
output_dir: Output directory to store the hashed checkpoint file.
(will be created if not exist)
Returns:
Path to the hashed checkpoint file, the first 8 digits of SHA256 hash.
.. versionadded:: 0.5.0
"""
if isinstance(checkpoint_path, str):
checkpoint_path = Path(checkpoint_path)
if isinstance(output_dir, str):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
sha_hash = hashlib.sha256(checkpoint_path.read_bytes()).hexdigest()
old_filename = checkpoint_path.stem
new_filename = "-".join((old_filename, sha_hash[:8])) + ".pt"
hash_checkpoint_path = output_dir / new_filename
shutil.move(str(checkpoint_path), hash_checkpoint_path)
return hash_checkpoint_path, sha_hash
|
4,660 |
def _create_plot_component(obj):
# Setup the spectrum plot
frequencies = linspace(0.0, float(SAMPLING_RATE) / 2, num=NUM_SAMPLES / 2)
obj.spectrum_data = ArrayPlotData(frequency=frequencies)
empty_amplitude = zeros(NUM_SAMPLES // 2)
obj.spectrum_data.set_data("amplitude", empty_amplitude)
obj.spectrum_plot = Plot(obj.spectrum_data)
obj.spectrum_plot.plot(
("frequency", "amplitude"), name="Spectrum", color="red"
)
obj.spectrum_plot.padding = 50
obj.spectrum_plot.title = "Spectrum"
spec_range = list(obj.spectrum_plot.plots.values())[0][
0
].value_mapper.range
spec_range.low = 0.0
spec_range.high = 5.0
obj.spectrum_plot.index_axis.title = "Frequency (Hz)"
obj.spectrum_plot.value_axis.title = "Amplitude"
# Time Series plot
times = linspace(0.0, float(NUM_SAMPLES) / SAMPLING_RATE, num=NUM_SAMPLES)
obj.time_data = ArrayPlotData(time=times)
empty_amplitude = zeros(NUM_SAMPLES)
obj.time_data.set_data("amplitude", empty_amplitude)
obj.time_plot = Plot(obj.time_data)
obj.time_plot.plot(("time", "amplitude"), name="Time", color="blue")
obj.time_plot.padding = 50
obj.time_plot.title = "Time"
obj.time_plot.index_axis.title = "Time (seconds)"
obj.time_plot.value_axis.title = "Amplitude"
time_range = list(obj.time_plot.plots.values())[0][0].value_mapper.range
time_range.low = -0.2
time_range.high = 0.2
# Spectrogram plot
spectrogram_data = zeros((NUM_SAMPLES // 2, SPECTROGRAM_LENGTH))
obj.spectrogram_plotdata = ArrayPlotData()
obj.spectrogram_plotdata.set_data("imagedata", spectrogram_data)
spectrogram_plot = Plot(obj.spectrogram_plotdata)
max_time = float(SPECTROGRAM_LENGTH * NUM_SAMPLES) / SAMPLING_RATE
max_freq = float(SAMPLING_RATE / 2)
spectrogram_plot.img_plot(
"imagedata",
name="Spectrogram",
xbounds=(0, max_time),
ybounds=(0, max_freq),
colormap=hot,
)
range_obj = spectrogram_plot.plots["Spectrogram"][0].value_mapper.range
range_obj.high = 5
range_obj.low = 0.0
spectrogram_plot.title = "Spectrogram"
obj.spectrogram_plot = spectrogram_plot
container = HPlotContainer()
container.add(obj.spectrum_plot)
container.add(obj.time_plot)
container.add(spectrogram_plot)
return container
|
def _create_plot_component(obj):
# Setup the spectrum plot
frequencies = linspace(0.0, float(SAMPLING_RATE) / 2, num=NUM_SAMPLES / 2)
obj.spectrum_data = ArrayPlotData(frequency=frequencies)
empty_amplitude = zeros(NUM_SAMPLES // 2)
obj.spectrum_data.set_data("amplitude", empty_amplitude)
obj.spectrum_plot = Plot(obj.spectrum_data)
obj.spectrum_plot.plot(
("frequency", "amplitude"), name="Spectrum", color="red"
)
obj.spectrum_plot.padding = 50
obj.spectrum_plot.title = "Spectrum"
spec_range = list(obj.spectrum_plot.plots.values())[0][0].value_mapper.range
spec_range.low = 0.0
spec_range.high = 5.0
obj.spectrum_plot.index_axis.title = "Frequency (Hz)"
obj.spectrum_plot.value_axis.title = "Amplitude"
# Time Series plot
times = linspace(0.0, float(NUM_SAMPLES) / SAMPLING_RATE, num=NUM_SAMPLES)
obj.time_data = ArrayPlotData(time=times)
empty_amplitude = zeros(NUM_SAMPLES)
obj.time_data.set_data("amplitude", empty_amplitude)
obj.time_plot = Plot(obj.time_data)
obj.time_plot.plot(("time", "amplitude"), name="Time", color="blue")
obj.time_plot.padding = 50
obj.time_plot.title = "Time"
obj.time_plot.index_axis.title = "Time (seconds)"
obj.time_plot.value_axis.title = "Amplitude"
time_range = list(obj.time_plot.plots.values())[0][0].value_mapper.range
time_range.low = -0.2
time_range.high = 0.2
# Spectrogram plot
spectrogram_data = zeros((NUM_SAMPLES // 2, SPECTROGRAM_LENGTH))
obj.spectrogram_plotdata = ArrayPlotData()
obj.spectrogram_plotdata.set_data("imagedata", spectrogram_data)
spectrogram_plot = Plot(obj.spectrogram_plotdata)
max_time = float(SPECTROGRAM_LENGTH * NUM_SAMPLES) / SAMPLING_RATE
max_freq = float(SAMPLING_RATE / 2)
spectrogram_plot.img_plot(
"imagedata",
name="Spectrogram",
xbounds=(0, max_time),
ybounds=(0, max_freq),
colormap=hot,
)
range_obj = spectrogram_plot.plots["Spectrogram"][0].value_mapper.range
range_obj.high = 5
range_obj.low = 0.0
spectrogram_plot.title = "Spectrogram"
obj.spectrogram_plot = spectrogram_plot
container = HPlotContainer()
container.add(obj.spectrum_plot)
container.add(obj.time_plot)
container.add(spectrogram_plot)
return container
|
55,023 |
def pauli_mult(pauli_1, pauli_2, wire_map=None):
"""Multiply two Pauli words together.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations.
Args:
pauli_1 (qml.Operation): A Pauli word.
pauli_2 (qml.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
(qml.Operation): The product of pauli_1 and pauli_2 as a Pauli word
(ignoring the global phase).
**Example**
This function enables multiplication of Pauli group elements at the level of
Pauli words, rather than matrices. For example,
.. code-block:: python
import pennylane as qml
from pennylane.grouping.pauli_group import pauli_mult
pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
product = pauli_mult(pauli_1, pauli_2)
print(product)
will yield ``qml.PauliZ(0)``.
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity
if are_identical_pauli_words(pauli_1, pauli_2):
first_wire = list(wire_map.keys())[0]
return Identity(first_wire)
# Compute binary symplectic representations
pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map)
pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map)
bin_symp_1 = np.array([int(x) for x in pauli_1_binary])
bin_symp_2 = np.array([int(x) for x in pauli_2_binary])
# Shorthand for bitwise XOR of numpy arrays
pauli_product = bin_symp_1 ^ bin_symp_2
return binary_to_pauli(pauli_product, wire_map=wire_map)
|
def pauli_mult(pauli_1, pauli_2, wire_map=None):
"""Multiply two Pauli words together.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations.
Args:
pauli_1 (.Operation): A Pauli word.
pauli_2 (.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
(qml.Operation): The product of pauli_1 and pauli_2 as a Pauli word
(ignoring the global phase).
**Example**
This function enables multiplication of Pauli group elements at the level of
Pauli words, rather than matrices. For example,
.. code-block:: python
import pennylane as qml
from pennylane.grouping.pauli_group import pauli_mult
pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
product = pauli_mult(pauli_1, pauli_2)
print(product)
will yield ``qml.PauliZ(0)``.
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity
if are_identical_pauli_words(pauli_1, pauli_2):
first_wire = list(wire_map.keys())[0]
return Identity(first_wire)
# Compute binary symplectic representations
pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map)
pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map)
bin_symp_1 = np.array([int(x) for x in pauli_1_binary])
bin_symp_2 = np.array([int(x) for x in pauli_2_binary])
# Shorthand for bitwise XOR of numpy arrays
pauli_product = bin_symp_1 ^ bin_symp_2
return binary_to_pauli(pauli_product, wire_map=wire_map)
|
36,076 |
def stash_calculation(calculation, transport):
"""Stash files from the working directory of a completed calculation to a permanent remote folder.
After a calculation has been completed, optionally stash files from the work directory to a storage location on the
same remote machine. This is useful if one wants to keep certain files from a completed calculation to be removed
from the scratch directory, because they are necessary for restarts, but that are too heavy to retrieve.
Instructions of which files to copy where are retrieved from the `stash.source_list` option.
:param calculation: the calculation job node.
:param transport: an already opened transport.
"""
from aiida.common.datastructures import StashMode
from aiida.orm import RemoteData
logger_extra = get_dblogger_extra(calculation)
stash_options = calculation.get_option('stash')
stash_mode = stash_options.get('mode', StashMode.COPY.value)
source_list = stash_options.get('source_list', [])
if not source_list:
return
if stash_mode != StashMode.COPY.value:
execlogger.warning(f'stashing mode {stash_mode} is not implemented yet.')
return
execlogger.debug(f'stashing files for calculation<{calculation.pk}>: {source_list}', extra=logger_extra)
uuid = calculation.uuid
target_basepath = os.path.join(stash_options['path'], uuid[:2], uuid[2:4], uuid[4:])
for source_filename in source_list:
source_filepath = os.path.join(calculation.get_remote_workdir(), source_filename)
target_filepath = os.path.join(target_basepath, source_filename)
# If the source file is in a (nested) directory, create those directories first in the target directory
target_dirname = os.path.dirname(target_filepath)
transport.makedirs(target_dirname, ignore_existing=True)
try:
transport.copy(source_filepath, target_filepath)
except (IOError, ValueError) as exception:
execlogger.warning(f'failed to stash {source_filepath} to {target_filepath}: {exception}')
else:
execlogger.debug(f'stashd {source_filepath} to {target_filepath}')
remote_folder = RemoteData(computer=calculation.computer, remote_path=target_basepath).store()
remote_folder.add_incoming(calculation, link_type=LinkType.CREATE, link_label='stash_folder')
|
def stash_calculation(calculation, transport):
"""Stash files from the working directory of a completed calculation to a permanent remote folder.
After a calculation has been completed, optionally stash files from the work directory to a storage location on the
same remote machine. This is useful if one wants to keep certain files from a completed calculation to be removed
from the scratch directory, because they are necessary for restarts, but that are too heavy to retrieve.
Instructions of which files to copy where are retrieved from the `stash.source_list` option.
:param calculation: the calculation job node.
:param transport: an already opened transport.
"""
from aiida.common.datastructures import StashMode
from aiida.orm import RemoteData
logger_extra = get_dblogger_extra(calculation)
stash_options = calculation.get_option('stash')
stash_mode = stash_options.get('mode', StashMode.COPY.value)
source_list = stash_options.get('source_list', [])
if not source_list:
return
if stash_mode != StashMode.COPY.value:
execlogger.warning(f'stashing mode {stash_mode} is not implemented yet.')
return
execlogger.debug(f'stashing files for calculation<{calculation.pk}>: {source_list}', extra=logger_extra)
uuid = calculation.uuid
target_basepath = os.path.join(stash_options['path'], uuid[:2], uuid[2:4], uuid[4:])
for source_filename in source_list:
source_filepath = os.path.join(calculation.get_remote_workdir(), source_filename)
target_filepath = os.path.join(target_basepath, source_filename)
# If the source file is in a (nested) directory, create those directories first in the target directory
target_dirname = os.path.dirname(target_filepath)
transport.makedirs(target_dirname, ignore_existing=True)
try:
transport.copy(source_filepath, target_filepath)
except (IOError, ValueError) as exception:
execlogger.warning(f'failed to stash {source_filepath} to {target_filepath}: {exception}')
else:
execlogger.debug(f'stashed {source_filepath} to {target_filepath}')
remote_folder = RemoteData(computer=calculation.computer, remote_path=target_basepath).store()
remote_folder.add_incoming(calculation, link_type=LinkType.CREATE, link_label='stash_folder')
|
30,670 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
user: str = params.get('username')
base_url: str = params.get('base_url', "").rstrip('/')
tenant_name: str = params.get('tenant_name')
username = f"{user}@{tenant_name}"
password: str = params.get('password')
token = params.get('token')
verify_certificate: bool = not params.get('insecure', False)
proxy: bool = params.get('proxy', False)
tenant_url = f"{base_url}/{tenant_name}/Staffing/"
commands = {
"test-module": test_module,
"workday-list-workers": list_workers_command
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(tenant_url=tenant_url, verify_certificate=verify_certificate, proxy=proxy,
tenant_name=tenant_name, token=token, username=username, password=password)
if command in commands:
return_results(commands[command](client, demisto.args()))
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
user: str = params.get('username')
base_url: str = params.get('base_url', "").rstrip('/')
tenant_name: str = params.get('tenant_name')
username = f"{user}@{tenant_name}"
password: str = params.get('password')
token = params.get('token')
verify_certificate: bool = not params.get('insecure', False)
proxy: bool = params.get('proxy', False)
tenant_url = f"{base_url}/{tenant_name}/Staffing/"
commands = {
"test-module": test_module,
"workday-list-workers": list_workers_command,
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(tenant_url=tenant_url, verify_certificate=verify_certificate, proxy=proxy,
tenant_name=tenant_name, token=token, username=username, password=password)
if command in commands:
return_results(commands[command](client, demisto.args()))
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
54,095 |
def aggregate_to_substations(n, buses_i=None):
# can be used to aggregate a selection of buses to electrically closest neighbors
# if no buses are given, nodes that are no substations or without offshore connection are aggregated
if buses_i is None:
logger.info("Aggregating buses that are no substations or have no valid offshore connection")
buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus))
busmap = n.buses.index.to_series()
index = [np.append(["Line" for c in range(len(n.lines))],
["Link" for c in range(len(n.links))]),
np.append(n.lines.index, n.links.index)]
#under_construction lines should be last choice, but weight should be < inf in case no other node is reachable, hence 1e-3
weight = pd.Series(np.append((n.lines.length/n.lines.s_nom.apply(lambda b: b if b>0 else 1e-3)).values,
(n.links.length/n.links.p_nom.apply(lambda b: b if b>0 else 1e-3)).values),
index=index)
adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight)
dist = dijkstra(adj, directed=False, indices=n.buses.index.get_indexer(buses_i))
dist[:, n.buses.index.get_indexer(buses_i)] = np.inf #bus in buses_i should not be assigned to different bus in buses_i
#restrict to same country:
for bus in buses_i:
country_buses = n.buses[~n.buses.country.isin([n.buses.loc[bus].country])].index
dist[n.buses.loc[buses_i].index.get_indexer([bus]),n.buses.index.get_indexer(country_buses)] = np.inf
assign_to = dist.argmin(axis=1)
busmap.loc[buses_i] = n.buses.iloc[assign_to].index
clustering = get_clustering_from_busmap(n, busmap,
bus_strategies=dict(country=_make_consense("Bus", "country")),
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies={'p_nom_max': 'sum'},
scale_link_capital_costs=False)
return clustering.network, busmap
|
def aggregate_to_substations(n, buses_i=None):
# can be used to aggregate a selection of buses to electrically closest neighbors
# if no buses are given, nodes that are no substations or without offshore connection are aggregated
if buses_i is None:
logger.info("Aggregating buses that are no substations or have no valid offshore connection")
buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus))
weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3),
'Link': n.links.length/n.links.p_nom.clip(1e-3)})
adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight)
bus_indexer = n.buses.index.get_indexer(buses_i)
dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index)
dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
for c in n.buses.country.unique():
incountry_b = n.buses.country == c
dist.loc[incountry_b, ~incountry_b] = np.inf
busmap = n.buses.index.to_series()
busmap.loc[buses_i] = dist.idxmin(1)
clustering = get_clustering_from_busmap(n, busmap,
bus_strategies=dict(country=_make_consense("Bus", "country")),
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies={'p_nom_max': 'sum'},
scale_link_capital_costs=False)
return clustering.network, busmap
|
30,829 |
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability', {}).get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
12,046 |
def expand_filespecs(file_specs, files_expected=True):
"""
Find all matching file paths from a list of file-specs.
Parameters
----------
file_specs : iterable of str
File paths which may contain ``~`` elements or wildcards.
files_expected : bool
Whether file is expected to exist (i.e. for load).
Returns
-------
list of str
if files_expected is ``True``:
A well-ordered list of matching absolute file paths.
If any of the file-specs match no existing files, an
exception is raised.
if files_expected is ``False``:
A list of expanded file paths.
"""
# Remove any hostname component - currently unused
filenames = [
os.path.abspath(
os.path.expanduser(fn[2:] if fn.startswith("//") else fn)
)
for fn in file_specs
]
if files_expected:
# Try to expand all filenames as globs
glob_expanded = OrderedDict(
[[fn, sorted(glob.glob(fn))] for fn in filenames]
)
# If any of the specs expanded to an empty list then raise an error
all_expanded = glob_expanded.values()
if not all(all_expanded):
msg = "One or more of the files specified did not exist:"
for pattern, expanded in glob_expanded.items():
if expanded:
msg += '\n - "{}" matched {} file(s)'.format(
pattern, len(expanded)
)
else:
msg += '\n * "{}" didn\'t match any files'.format(
pattern
)
raise IOError(msg)
result = [fname for fnames in all_expanded for fname in fnames]
else:
result = filenames
return result
|
def expand_filespecs(file_specs, files_expected=True):
"""
Find all matching file paths from a list of file-specs.
Parameters
----------
file_specs : iterable of str
File paths which may contain ``~`` elements or wildcards.
files_expected : bool, default=True
Whether file is expected to exist (i.e. for load).
Returns
-------
list of str
if files_expected is ``True``:
A well-ordered list of matching absolute file paths.
If any of the file-specs match no existing files, an
exception is raised.
if files_expected is ``False``:
A list of expanded file paths.
"""
# Remove any hostname component - currently unused
filenames = [
os.path.abspath(
os.path.expanduser(fn[2:] if fn.startswith("//") else fn)
)
for fn in file_specs
]
if files_expected:
# Try to expand all filenames as globs
glob_expanded = OrderedDict(
[[fn, sorted(glob.glob(fn))] for fn in filenames]
)
# If any of the specs expanded to an empty list then raise an error
all_expanded = glob_expanded.values()
if not all(all_expanded):
msg = "One or more of the files specified did not exist:"
for pattern, expanded in glob_expanded.items():
if expanded:
msg += '\n - "{}" matched {} file(s)'.format(
pattern, len(expanded)
)
else:
msg += '\n * "{}" didn\'t match any files'.format(
pattern
)
raise IOError(msg)
result = [fname for fnames in all_expanded for fname in fnames]
else:
result = filenames
return result
|
41,200 |
def test_clifford_tableau():
seen_tableau = []
for trans_x, trans_z in _all_rotation_pairs():
gate = cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
tableau_matrix = _extract_clifford_tableau_to_matrix(gate.clifford_tableau)
tableau_number = sum(2 ** i * t for i, t in enumerate(tableau_matrix.ravel()))
seen_tableau.append(tableau_number)
# Satify the symplectic property
assert sum(tableau_matrix[0, :2] * tableau_matrix[1, 1::-1]) % 2 == 1
# Should not have any duplication.
assert len(set(seen_tableau)) == 24
|
def test_clifford_tableau():
seen_tableau = []
for trans_x, trans_z in _all_rotation_pairs():
gate = cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
tableau_matrix = _extract_clifford_tableau_to_matrix(gate.clifford_tableau)
tableau_number = sum(2 ** i * t for i, t in enumerate(tableau_matrix.ravel()))
seen_tableau.append(tableau_number)
# Satisfy the symplectic property
assert sum(tableau_matrix[0, :2] * tableau_matrix[1, 1::-1]) % 2 == 1
# Should not have any duplication.
assert len(set(seen_tableau)) == 24
|
28,505 |
def _model_predict(
model: Any, # TODO Narrow this down
X: SUPPORTED_FEAT_TYPES,
task: int,
batch_size: Optional[int] = None
) -> np.ndarray:
""" Generates the predictions from a model.
This is seperated out into a seperate function to allow for multiprocessing
and perform parallel predictions.
# TODO issue 1169
# As CV models are collected into a VotingRegressor which does not
# not support multioutput regression, we need to manually transform and
# average their predictions.
Parameters
----------
model: Any
The model to perform predictions with
X: array-like (n_samples, ...)
The data to perform predictions on.
task: int
The int identifier indicating the kind of task that the model was
trained on.
batchsize: Optional[int] = None
If the model supports batch_size predictions then it's possible to pass
this in as a parameter.
Returns
-------
np.ndarray (n_samples, ...)
The predictions produced by the model
"""
# Copy the array and ensure is has the attr 'shape'
X_ = np.asarray(X) if isinstance(X, list) else X.copy()
assert X_.shape[0] >= 1, f"X must have more than 1 sample but has {X_.shape[0]}"
with warnings.catch_warnings():
# TODO issue 1169
# VotingRegressors aren't meant to be used for multioutput but we are
# using them anyways. Hence we need to manually get their outputs and
# average the right index as it averages on wrong dimension for us.
# We should probaly move away from this in the future.
#
# def VotingRegressor.predict()
# return np.average(self._predict(X), axis=1) <- wrong axis
#
if task == MULTIOUTPUT_REGRESSION and isinstance(model, VotingRegressor):
voting_regressor = model
prediction = np.average(voting_regressor.transform(X_), axis=2).T
else:
if task in CLASSIFICATION_TASKS:
predict_func = model.predict_proba
else:
predict_func = model.predict
if batch_size is not None and hasattr(model, 'batch_size'):
prediction = predict_func(X_, batch_size=batch_size)
else:
prediction = predict_func(X_)
# Check that probability values lie between 0 and 1.
if task in CLASSIFICATION_TASKS:
assert (prediction >= 0).all() and (prediction <= 1).all(), \
f"For {model}, prediction probability not within [0, 1]!"
assert prediction.shape[0] == X_.shape[0], \
f"Prediction shape {model} is {prediction.shape} while X_.shape is {X_.shape}"
return prediction
|
def _model_predict(
model: Any, # TODO Narrow this down
X: SUPPORTED_FEAT_TYPES,
task: int,
batch_size: Optional[int] = None
) -> np.ndarray:
""" Generates the predictions from a model.
This is seperated out into a seperate function to allow for multiprocessing
and perform parallel predictions.
# TODO issue 1169
# As CV models are collected into a VotingRegressor which does not
# not support multioutput regression, we need to manually transform and
# average their predictions.
Parameters
----------
model: Any
The model to perform predictions with
X: array-like (n_samples, ...)
The data to perform predictions on.
task: int
The int identifier indicating the kind of task that the model was
trained on.
batchsize: Optional[int] = None
If the model supports batch_size predictions then it's possible to pass
this in as an argument.
Returns
-------
np.ndarray (n_samples, ...)
The predictions produced by the model
"""
# Copy the array and ensure is has the attr 'shape'
X_ = np.asarray(X) if isinstance(X, list) else X.copy()
assert X_.shape[0] >= 1, f"X must have more than 1 sample but has {X_.shape[0]}"
with warnings.catch_warnings():
# TODO issue 1169
# VotingRegressors aren't meant to be used for multioutput but we are
# using them anyways. Hence we need to manually get their outputs and
# average the right index as it averages on wrong dimension for us.
# We should probaly move away from this in the future.
#
# def VotingRegressor.predict()
# return np.average(self._predict(X), axis=1) <- wrong axis
#
if task == MULTIOUTPUT_REGRESSION and isinstance(model, VotingRegressor):
voting_regressor = model
prediction = np.average(voting_regressor.transform(X_), axis=2).T
else:
if task in CLASSIFICATION_TASKS:
predict_func = model.predict_proba
else:
predict_func = model.predict
if batch_size is not None and hasattr(model, 'batch_size'):
prediction = predict_func(X_, batch_size=batch_size)
else:
prediction = predict_func(X_)
# Check that probability values lie between 0 and 1.
if task in CLASSIFICATION_TASKS:
assert (prediction >= 0).all() and (prediction <= 1).all(), \
f"For {model}, prediction probability not within [0, 1]!"
assert prediction.shape[0] == X_.shape[0], \
f"Prediction shape {model} is {prediction.shape} while X_.shape is {X_.shape}"
return prediction
|
8,888 |
def run(settings, pid_file, daemon=False):
"""Run the bot with these ``settings``
:param settings: bot's settings to run with
:type settings: :class:`sopel.config.Config`
:param str pid_file: path to the bot's PID file
:param bool daemon: tell if the bot should be ran as a daemon
"""
delay = 20
# Acts as a welcome message, showing the program and platform version at start
print_version()
# Also show the location of the config file used to load settings
print("\nLoaded config file: {}".format(settings.filename))
if not settings.core.ca_certs:
tools.stderr(
'Could not open CA certificates file. SSL will not work properly!')
# Define empty variable `p` for bot
p = None
while True:
if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(settings, daemon=daemon)
p.setup()
p.set_signal_handlers()
except KeyboardInterrupt:
tools.stderr('Bot setup interrupted')
break
except Exception:
# In that case, there is nothing we can do.
# If the bot can't setup itself, then it won't run.
# This is a critical case scenario, where the user should have
# direct access to the exception traceback right in the console.
# Besides, we can't know if logging has been set up or not, so
# we can't rely on that here.
tools.stderr('Unexpected error in bot setup')
raise
try:
p.run(settings.core.host, int(settings.core.port))
except KeyboardInterrupt:
break
except Exception:
err_log = logging.getLogger('sopel.exceptions')
err_log.exception('Critical exception in core')
err_log.error('----------------------------------------')
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
LOGGER.warning('Disconnected. Reconnecting in %s seconds...', delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
|
def run(settings, pid_file, daemon=False):
"""Run the bot with these ``settings``
:param settings: settings with which to run the bot
:type settings: :class:`sopel.config.Config`
:param str pid_file: path to the bot's PID file
:param bool daemon: tell if the bot should be ran as a daemon
"""
delay = 20
# Acts as a welcome message, showing the program and platform version at start
print_version()
# Also show the location of the config file used to load settings
print("\nLoaded config file: {}".format(settings.filename))
if not settings.core.ca_certs:
tools.stderr(
'Could not open CA certificates file. SSL will not work properly!')
# Define empty variable `p` for bot
p = None
while True:
if p and p.hasquit: # Check if `hasquit` was set for bot during disconnected phase
break
try:
p = bot.Sopel(settings, daemon=daemon)
p.setup()
p.set_signal_handlers()
except KeyboardInterrupt:
tools.stderr('Bot setup interrupted')
break
except Exception:
# In that case, there is nothing we can do.
# If the bot can't setup itself, then it won't run.
# This is a critical case scenario, where the user should have
# direct access to the exception traceback right in the console.
# Besides, we can't know if logging has been set up or not, so
# we can't rely on that here.
tools.stderr('Unexpected error in bot setup')
raise
try:
p.run(settings.core.host, int(settings.core.port))
except KeyboardInterrupt:
break
except Exception:
err_log = logging.getLogger('sopel.exceptions')
err_log.exception('Critical exception in core')
err_log.error('----------------------------------------')
# TODO: This should be handled by command_start
# All we should need here is a return value, but replacing the
# os._exit() call below (at the end) broke ^C.
# This one is much harder to test, so until that one's sorted it
# isn't worth the risk of trying to remove this one.
os.unlink(pid_file)
os._exit(1)
if not isinstance(delay, int):
break
if p.wantsrestart:
return -1
if p.hasquit:
break
LOGGER.warning('Disconnected. Reconnecting in %s seconds...', delay)
time.sleep(delay)
# TODO: This should be handled by command_start
# All we should need here is a return value, but making this
# a return makes Sopel hang on ^C after it says "Closed!"
os.unlink(pid_file)
os._exit(0)
|
59,087 |
def validate_transfer_inputs(inputs, ctx): # pylint: disable=inconsistent-return-statements, unused-argument
"""Check that the instructions dict and the source nodes are consistent"""
source_nodes = inputs['source_nodes']
instructions = inputs['instructions']
computer = inputs['metadata']['computer']
instructions_dict = instructions.get_dict()
local_files = instructions_dict.get('local_files', list())
remote_files = instructions_dict.get('remote_files', list())
symlink_files = instructions_dict.get('symlink_files', list())
source_nodes_provided = set()
source_nodes_required = set()
error_message_list = []
for node_label, node_object in source_nodes.items():
if isinstance(node_object, orm.RemoteData):
if computer.name != node_object.computer.name:
error_message = ' > remote node `{}` points to computer `{}`, not the one being used (`{}`)'
error_message = error_message.format(node_label, node_object.computer.name, computer.name)
error_message_list.append(error_message)
for source_label, _, _ in local_files:
source_nodes_required.add(source_label)
source_node = source_nodes.get(source_label, None)
error_message = check_node_type('local_files', source_label, source_node, orm.FolderData)
if error_message:
error_message_list.append(error_message)
for source_label, _, _ in remote_files:
source_nodes_required.add(source_label)
source_node = source_nodes.get(source_label, None)
error_message = check_node_type('remote_files', source_label, source_node, orm.RemoteData)
if error_message:
error_message_list.append(error_message)
for source_label, _, _ in symlink_files:
source_nodes_required.add(source_label)
source_node = source_nodes.get(source_label, None)
error_message = check_node_type('symlink_files', source_label, source_node, orm.RemoteData)
if error_message:
error_message_list.append(error_message)
unrequired_nodes = source_nodes_provided.difference(source_nodes_required)
for node_label in unrequired_nodes:
error_message = ' > node `{}` provided as inputs is not being used'
error_message = error_message.format(node_label)
error_message_list.append(error_message)
if len(error_message_list) > 0:
error_message = '\n\n'
for error_add in error_message_list:
error_message = error_message + error_add + '\n'
return error_message
|
def validate_transfer_inputs(inputs, ctx): # pylint: disable=inconsistent-return-statements, unused-argument
"""Check that the instructions dict and the source nodes are consistent"""
source_nodes = inputs['source_nodes']
instructions = inputs['instructions']
computer = inputs['metadata']['computer']
instructions_dict = instructions.get_dict()
local_files = instructions_dict.get('local_files', [])
remote_files = instructions_dict.get('remote_files', [])
symlink_files = instructions_dict.get('symlink_files', [])
source_nodes_provided = set()
source_nodes_required = set()
error_message_list = []
for node_label, node_object in source_nodes.items():
if isinstance(node_object, orm.RemoteData):
if computer.name != node_object.computer.name:
error_message = ' > remote node `{}` points to computer `{}`, not the one being used (`{}`)'
error_message = error_message.format(node_label, node_object.computer.name, computer.name)
error_message_list.append(error_message)
for source_label, _, _ in local_files:
source_nodes_required.add(source_label)
source_node = source_nodes.get(source_label, None)
error_message = check_node_type('local_files', source_label, source_node, orm.FolderData)
if error_message:
error_message_list.append(error_message)
for source_label, _, _ in remote_files:
source_nodes_required.add(source_label)
source_node = source_nodes.get(source_label, None)
error_message = check_node_type('remote_files', source_label, source_node, orm.RemoteData)
if error_message:
error_message_list.append(error_message)
for source_label, _, _ in symlink_files:
source_nodes_required.add(source_label)
source_node = source_nodes.get(source_label, None)
error_message = check_node_type('symlink_files', source_label, source_node, orm.RemoteData)
if error_message:
error_message_list.append(error_message)
unrequired_nodes = source_nodes_provided.difference(source_nodes_required)
for node_label in unrequired_nodes:
error_message = ' > node `{}` provided as inputs is not being used'
error_message = error_message.format(node_label)
error_message_list.append(error_message)
if len(error_message_list) > 0:
error_message = '\n\n'
for error_add in error_message_list:
error_message = error_message + error_add + '\n'
return error_message
|
41,754 |
def prepare_data():
globe_indexer = allennlp.data.token_indexers.SingleIdTokenIndexer(
lowercase_tokens=True
)
tokenizer = allennlp.data.tokenizers.WordTokenizer(
word_splitter=allennlp.data.tokenizers.word_splitter.JustSpacesWordSplitter(),
)
reader = allennlp.data.dataset_readers.TextClassificationJsonReader(
token_indexers={'tokens': globe_indexer},
tokenizer=tokenizer,
)
train_dataset = reader.read(
'https://s3-us-west-2.amazonaws.com/allennlp/datasets/imdb/train.jsonl'
)
valid_dataset = reader.read(
'https://s3-us-west-2.amazonaws.com/allennlp/datasets/imdb/dev.jsonl'
)
vocab = allennlp.data.Vocabulary.from_instances(train_dataset)
return train_dataset, valid_dataset, vocab
|
def prepare_data():
glove_indexer = allennlp.data.token_indexers.SingleIdTokenIndexer(
lowercase_tokens=True
)
tokenizer = allennlp.data.tokenizers.WordTokenizer(
word_splitter=allennlp.data.tokenizers.word_splitter.JustSpacesWordSplitter(),
)
reader = allennlp.data.dataset_readers.TextClassificationJsonReader(
token_indexers={'tokens': globe_indexer},
tokenizer=tokenizer,
)
train_dataset = reader.read(
'https://s3-us-west-2.amazonaws.com/allennlp/datasets/imdb/train.jsonl'
)
valid_dataset = reader.read(
'https://s3-us-west-2.amazonaws.com/allennlp/datasets/imdb/dev.jsonl'
)
vocab = allennlp.data.Vocabulary.from_instances(train_dataset)
return train_dataset, valid_dataset, vocab
|
5,384 |
def test_absent():
"""
Test to ensure that the named user is absent.
"""
dbname = "my_test"
ret = {"name": dbname, "result": True, "comment": "", "changes": {}}
mock_db_exists = MagicMock(return_value=True)
mock_remove = MagicMock(return_value=True)
mock_remove_fail = MagicMock(return_value=False)
mock_err = MagicMock(return_value="salt")
with patch.dict(
mysql_database.__salt__,
{"mysql.db_exists": mock_db_exists, "mysql.db_remove": mock_remove},
):
with patch.dict(mysql_database.__opts__, {"test": True}):
comt = "Database {} is present and needs to be removed".format(dbname)
ret.update({"comment": comt, "result": None})
assert mysql_database.absent(dbname) == ret
with patch.dict(mysql_database.__opts__, {}):
comt = "Database {} has been removed".format(dbname)
ret.update({"comment": comt, "result": True})
ret.update({"changes": {dbname: "Absent"}})
assert mysql_database.absent(dbname) == ret
with patch.dict(
mysql_database.__salt__,
{"mysql.db_exists": mock_db_exists, "mysql.db_remove": mock_remove_fail},
):
with patch.dict(mysql_database.__opts__, {}):
with patch.object(mysql_database, "_get_mysql_error", mock_err):
ret["changes"] = {}
comt = "Unable to remove database {} ({})".format(dbname, "salt")
ret.update({"comment": comt, "result": False})
assert mysql_database.absent(dbname) == ret
|
def test_absent():
"""
Test to ensure that the named database is absent.
"""
dbname = "my_test"
ret = {"name": dbname, "result": True, "comment": "", "changes": {}}
mock_db_exists = MagicMock(return_value=True)
mock_remove = MagicMock(return_value=True)
mock_remove_fail = MagicMock(return_value=False)
mock_err = MagicMock(return_value="salt")
with patch.dict(
mysql_database.__salt__,
{"mysql.db_exists": mock_db_exists, "mysql.db_remove": mock_remove},
):
with patch.dict(mysql_database.__opts__, {"test": True}):
comt = "Database {} is present and needs to be removed".format(dbname)
ret.update({"comment": comt, "result": None})
assert mysql_database.absent(dbname) == ret
with patch.dict(mysql_database.__opts__, {}):
comt = "Database {} has been removed".format(dbname)
ret.update({"comment": comt, "result": True})
ret.update({"changes": {dbname: "Absent"}})
assert mysql_database.absent(dbname) == ret
with patch.dict(
mysql_database.__salt__,
{"mysql.db_exists": mock_db_exists, "mysql.db_remove": mock_remove_fail},
):
with patch.dict(mysql_database.__opts__, {}):
with patch.object(mysql_database, "_get_mysql_error", mock_err):
ret["changes"] = {}
comt = "Unable to remove database {} ({})".format(dbname, "salt")
ret.update({"comment": comt, "result": False})
assert mysql_database.absent(dbname) == ret
|
3,514 |
def map_host_to_project(request):
"""
Take the incoming host, and map it to the proper Project.
We check, in order:
* The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping
* The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name
* The hostname without port information, which maps to ``Domain`` objects
"""
host = request.get_host().lower().split(':')[0]
public_domain = settings.PUBLIC_DOMAIN.lower().split(':')[0]
host_parts = host.split('.')
public_domain_parts = public_domain.split('.')
# Explicit Project slug being passed in
if 'HTTP_X_RTD_SLUG' in request.META:
project = request.META['HTTP_X_RTD_SLUG'].lower()
request.rtdheader = True
elif public_domain in host:
# Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`
if public_domain_parts == host_parts[1:]:
project = host_parts[0]
request.subdomain = True
log.debug('Proxito Public Domain: %s', host)
else:
# TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example
# But these feel like they might be phishing, etc. so let's block them for now.
project = None
log.warning('Weird variation on our hostname: %s', host)
raise Http404(f'404: Invalid domain matching {public_domain}')
# Serve CNAMEs
else:
domain_qs = Domain.objects.filter(domain=host).prefetch_related('project')
if domain_qs.exists():
project = domain_qs.first().project.slug
request.cname = True
log.debug('Proxito CNAME: %s', host)
else:
# Some person is CNAMEing to us without configuring a domain - 404.
project = None
log.debug('CNAME 404: %s', host)
raise Http404('CNAME 404')
log.debug('Proxito Project: %s', project)
return project
|
def map_host_to_project(request):
"""
Take the incoming host, and map it to the proper Project.
We check, in order:
* The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping
* The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name
* The hostname without port information, which maps to ``Domain`` objects
"""
host = request.get_host().lower().split(':')[0]
public_domain = settings.PUBLIC_DOMAIN.lower().split(':')[0]
host_parts = host.split('.')
public_domain_parts = public_domain.split('.')
# Explicit Project slug being passed in
if 'HTTP_X_RTD_SLUG' in request.META:
project = request.META['HTTP_X_RTD_SLUG'].lower()
request.rtdheader = True
elif public_domain in host:
# Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`
if public_domain_parts == host_parts[1:]:
project = host_parts[0]
request.subdomain = True
log.debug('Proxito Public Domain. host=%s', host)
else:
# TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example
# But these feel like they might be phishing, etc. so let's block them for now.
project = None
log.warning('Weird variation on our hostname: %s', host)
raise Http404(f'404: Invalid domain matching {public_domain}')
# Serve CNAMEs
else:
domain_qs = Domain.objects.filter(domain=host).prefetch_related('project')
if domain_qs.exists():
project = domain_qs.first().project.slug
request.cname = True
log.debug('Proxito CNAME: %s', host)
else:
# Some person is CNAMEing to us without configuring a domain - 404.
project = None
log.debug('CNAME 404: %s', host)
raise Http404('CNAME 404')
log.debug('Proxito Project: %s', project)
return project
|
34,509 |
def _write_core_yaml(
training_data_path: Path, output_path: Path, source_path: Path
) -> None:
from rasa.core.training.story_reader.yaml_story_reader import KEY_ACTIVE_LOOP
reader = MarkdownStoryReader()
writer = YAMLStoryWriter()
loop = asyncio.get_event_loop()
steps = loop.run_until_complete(reader.read_from_file(training_data_path))
if YAMLStoryWriter.stories_contain_loops(steps):
print_warning(
f"Training data file '{source_path}' contains forms. "
f"'form' key will be converted to '{KEY_ACTIVE_LOOP}' key. "
f"Please note that in order for these stories to work you still "
f"need the 'FormPolicy' to be active. However the 'FormPolicy' is "
f"deprecated, please consider switching to the new 'RulePolicy', "
f"you can find the documentation here: {DOCS_URL_RULES}."
)
writer.dump(output_path, steps)
print_success(f"Converted Core file: '{source_path}' >> '{output_path}'.")
|
def _write_core_yaml(
training_data_path: Path, output_path: Path, source_path: Path
) -> None:
from rasa.core.training.story_reader.yaml_story_reader import KEY_ACTIVE_LOOP
reader = MarkdownStoryReader()
writer = YAMLStoryWriter()
loop = asyncio.get_event_loop()
steps = loop.run_until_complete(reader.read_from_file(training_data_path))
if YAMLStoryWriter.stories_contain_loops(steps):
print_warning(
f"Training data file '{source_path}' contains forms. "
f"'form' key will be converted to '{KEY_ACTIVE_LOOP}' key. "
f"Please note that in order for these stories to work you still "
f"need the 'FormPolicy' to be active. However the 'FormPolicy' is "
f"deprecated, please consider switching to the new 'RulePolicy', "
f"for which you can find the documentation here: {DOCS_URL_RULES}."
)
writer.dump(output_path, steps)
print_success(f"Converted Core file: '{source_path}' >> '{output_path}'.")
|
35,152 |
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual: {})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
26,091 |
def download_and_unzip(url, output_dir, delete_zip=False):
"""Download zip file at 'url', extract it to 'output_dir'.
Returns:
``True`` if hte file was successfully downloaded and extracted,
``False`` otherwise.
"""
filename = os.path.basename(url)
if _download_data_zip(url, filename):
return _unzip(filename, output_dir, delete_zip=delete_zip)
return False
|
def download_and_unzip(url, output_dir, delete_zip=False):
"""Download zip file at 'url', extract it to 'output_dir'.
Returns:
``True`` if the file was successfully downloaded and extracted,
``False`` otherwise.
"""
filename = os.path.basename(url)
if _download_data_zip(url, filename):
return _unzip(filename, output_dir, delete_zip=delete_zip)
return False
|
59,115 |
def repository_maintain(
manager: Manager,
full: bool = False,
dry_run: bool = False,
**kwargs,
) -> None:
"""Performs maintenance tasks on the repository.
If `full == True`, then this method will attempt to block the profile to guarantee the
safety of its procedures. This will not only prevent any other subsequent process from
accessing that profile, but will also first check if there is already any process using
it and raise if that is the case. You will have to manually stop any processes that is
currently accessing the profile yourself or wait for them to finish on their own.
:param manager:
a Manager instance to control the profile/backend to be maintained.
:param full:
flag to perform operations that require to stop using the profile/backend to be maintained.
:param dry_run:
flag to only print the actions that would be taken without actually executing them.
"""
type_check(manager, Manager)
profile = manager.get_profile()
backend = manager.get_backend()
repository = backend.get_repository()
def perform_tasks():
unreferenced_objects = get_unreferenced_keyset(aiida_backend=backend)
MAINTAIN_LOGGER.info(f'Deleting {len(unreferenced_objects)} unreferenced objects ...')
if not dry_run:
repository.delete_objects(list(unreferenced_objects))
MAINTAIN_LOGGER.info('Starting repository-specific operations ...')
repository.maintain(live=not full, dry_run=dry_run, **kwargs)
if full:
with ProfileAccessManager(profile).lock():
perform_tasks()
else:
perform_tasks()
|
def repository_maintain(
manager: Manager,
full: bool = False,
dry_run: bool = False,
**kwargs,
) -> None:
"""Performs maintenance tasks on the repository.
If `full == True`, then this method will attempt to block the profile to guarantee the
safety of its procedures. This will not only prevent any other subsequent process from
accessing that profile, but will also first check if there is already any process using
it and raise if that is the case. You will have to manually stop any processes that is
currently accessing the profile yourself or wait for them to finish on their own.
:param manager:
a Manager instance to control the profile/backend to be maintained.
:param full:
flag to perform operations that require to stop using the profile/backend to be maintained.
:param dry_run:
flag to only print the actions that would be taken without actually executing them.
"""
type_check(manager, Manager)
profile = manager.get_profile()
backend = manager.get_backend()
repository = backend.get_repository()
def perform_tasks():
unreferenced_objects = get_unreferenced_keyset(aiida_backend=backend)
MAINTAIN_LOGGER.info(f'Deleting {len(unreferenced_objects)} unreferenced objects ...')
if not dry_run:
repository.delete_objects(list(unreferenced_objects))
MAINTAIN_LOGGER.info('Starting repository-specific operations ...')
repository.maintain(live=not full, dry_run=dry_run, **kwargs)
from contextlib import nullcontext
if full:
context = ProfileAccessManager(profile).lock
else:
context = nullcontext
with context():
unreferenced_objects = get_unreferenced_keyset(aiida_backend=backend)
MAINTAIN_LOGGER.info(f'Deleting {len(unreferenced_objects)} unreferenced objects ...')
if not dry_run:
repository.delete_objects(list(unreferenced_objects))
MAINTAIN_LOGGER.info('Starting repository-specific operations ...')
repository.maintain(live=not full, dry_run=dry_run, **kwargs)
|
7,152 |
def grass():
"""Grass.
Returns
-------
grass: (512, 512) uint8
Some grass.
"""
return load("grass.png")
|
def grass():
"""Grass.
Returns
-------
grass: (512, 512) uint8 image
Some grass.
"""
return load("grass.png")
|
11,456 |
def native_col_type(col_type, value):
if col_type == "datetime":
try:
value = Deserializer.deserialize_iso(value)
except Exception: # pylint: disable=bare-except
# if there is any exception in deserializing the iso,
# return the value to the user
pass
elif col_type in ("timespan", "guid"):
value = str(value)
return value
|
def native_col_type(col_type, value):
if col_type == "datetime":
try:
value = Deserializer.deserialize_iso(value)
except Exception:
# if there is any exception in deserializing the iso,
# return the value to the user
pass
elif col_type in ("timespan", "guid"):
value = str(value)
return value
|
39,694 |
def main():
module = ForemanAnsibleRolesModule(
foreman_spec = dict(
state = dict(default='fetch', choices=['fetch']),
proxy_id = dict(),
),
required_if = [
['state', 'fetch', ['proxy_id']]
],
)
module_params = module.clean_params()
with module.api_connection():
if module.state == 'fetch':
resources = module.fetch_resource('ansible_roles', module_params)
module.exit_json(ansible_roles=resources['results']['ansible_roles'])
|
def main():
module = ForemanAnsibleRolesModule(
foreman_spec = dict(
state = dict(default='fetch', choices=['fetch']),
smart_proxy = dict(type='entity', flat_name='proxy_id', aliases=['proxy']),
),
required_if = [
['state', 'fetch', ['proxy_id']]
],
)
module_params = module.clean_params()
with module.api_connection():
if module.state == 'fetch':
resources = module.fetch_resource('ansible_roles', module_params)
module.exit_json(ansible_roles=resources['results']['ansible_roles'])
|
7,370 |
def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1),
overlap_ratio=0.3):
"""
Masked normalized cross-correlation between arrays.
Parameters
----------
arr1 : ndarray
First array.
arr2 : ndarray
Seconds array. The dimensions of `arr2` along axes that are not
transformed should be equal to that of `arr1`.
m1 : ndarray
Mask of `arr1`. The mask should evaluate to `True`
(or 1) on valid pixels. `m1` should have the same shape as `arr1`.
m2 : ndarray
Mask of `arr2`. The mask should evaluate to `True`
(or 1) on valid pixels. `m2` should have the same shape as `arr2`.
mode : {'full', 'same'}, optional
'full':
This returns the convolution at each point of overlap. At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
The output is the same size as `arr1`, centered with respect
to the `‘full’` output. Boundary effects are less prominent.
axes : tuple of ints, optional
Axes along which to compute the cross-correlation.
overlap_ratio : float, optional
Minimum allowed overlap ratio between images. The correlation for
translations corresponding with an overlap ratio lower than this
threshold will be ignored. A lower `overlap_ratio` leads to smaller
maximum translation, while a higher `overlap_ratio` leads to greater
robustness against spurious matches due to small overlap between
masked images.
Returns
-------
out : ndarray
Masked normalized cross-correlation.
Raises
------
ValueError : if correlation `mode` is not valid, or array dimensions along
non-transformation axes are not equal.
References
----------
.. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
IEEE Transactions on Image Processing, vol. 21(5),
pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
.. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
if mode not in {'full', 'same'}:
raise ValueError(f"Correlation mode '{mode}' is not valid.")
fixed_image = np.asarray(arr1)
moving_image = np.asarray(arr2)
float_dtype = _supported_float_type(
[fixed_image.dtype, moving_image.dtype]
)
if float_dtype.kind == 'c':
raise ValueError("complex-valued arr1, arr2 are not supported")
fixed_image = fixed_image.astype(float_dtype)
fixed_mask = np.array(m1, dtype=bool)
moving_image = moving_image.astype(float_dtype)
moving_mask = np.array(m2, dtype=bool)
eps = np.finfo(float_dtype).eps
# Array dimensions along non-transformation axes should be equal.
all_axes = set(range(fixed_image.ndim))
for axis in (all_axes - set(axes)):
if fixed_image.shape[axis] != moving_image.shape[axis]:
raise ValueError(
f'Array shapes along non-transformation axes should be '
f'equal, but dimensions along axis {axis} are not.')
# Determine final size along transformation axes
# Note that it might be faster to compute Fourier transform in a slightly
# larger shape (`fast_shape`). Then, after all fourier transforms are done,
# we slice back to`final_shape` using `final_slice`.
final_shape = list(arr1.shape)
for axis in axes:
final_shape[axis] = fixed_image.shape[axis] + \
moving_image.shape[axis] - 1
final_shape = tuple(final_shape)
final_slice = tuple([slice(0, int(sz)) for sz in final_shape])
# Extent transform axes to the next fast length (i.e. multiple of 3, 5, or
# 7)
fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes])
# We use the new scipy.fft because they allow leaving the transform axes
# unchanged which was not possible with scipy.fftpack's
# fftn/ifftn in older versions of SciPy.
# E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4)
# results in arr_fft shape (4, 4, 7)
fft = partial(fftmodule.fftn, s=fast_shape, axes=axes)
_ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes)
def ifft(x):
return _ifft(x).real
fixed_image[np.logical_not(fixed_mask)] = 0.0
moving_image[np.logical_not(moving_mask)] = 0.0
# N-dimensional analog to rotation by 180deg is flip over all
# relevant axes.
# See [1] for discussion.
rotated_moving_image = _flip(moving_image, axes=axes)
rotated_moving_mask = _flip(moving_mask, axes=axes)
fixed_fft = fft(fixed_image)
rotated_moving_fft = fft(rotated_moving_image)
fixed_mask_fft = fft(fixed_mask.astype(float_dtype))
rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype))
# Calculate overlap of masks at every point in the convolution.
# Locations with high overlap should not be taken into account.
number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft)
number_overlap_masked_px[:] = np.round(number_overlap_masked_px)
number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps)
masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft)
masked_correlated_rotated_moving_fft = ifft(
fixed_mask_fft * rotated_moving_fft)
numerator = ifft(rotated_moving_fft * fixed_fft)
numerator -= masked_correlated_fixed_fft * \
masked_correlated_rotated_moving_fft / number_overlap_masked_px
fixed_squared_fft = fft(np.square(fixed_image))
fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft)
fixed_denom -= np.square(masked_correlated_fixed_fft) / \
number_overlap_masked_px
fixed_denom[:] = np.fmax(fixed_denom, 0.0)
rotated_moving_squared_fft = fft(np.square(rotated_moving_image))
moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft)
moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \
number_overlap_masked_px
moving_denom[:] = np.fmax(moving_denom, 0.0)
denom = np.sqrt(fixed_denom * moving_denom)
# Slice back to expected convolution shape.
numerator = numerator[final_slice]
denom = denom[final_slice]
number_overlap_masked_px = number_overlap_masked_px[final_slice]
if mode == 'same':
_centering = partial(_centered,
newshape=fixed_image.shape, axes=axes)
denom = _centering(denom)
numerator = _centering(numerator)
number_overlap_masked_px = _centering(number_overlap_masked_px)
# Pixels where `denom` is very small will introduce large
# numbers after division. To get around this problem,
# we zero-out problematic pixels.
tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True)
nonzero_indices = denom > tol
# explicitly set out dtype for compatibility with SciPy < 1.4, where
# fftmodule will be numpy.fft which always uses float64 dtype.
out = np.zeros_like(denom, dtype=float_dtype)
out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices]
np.clip(out, a_min=-1, a_max=1, out=out)
# Apply overlap ratio threshold
number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px,
axis=axes, keepdims=True)
out[number_overlap_masked_px < number_px_threshold] = 0.0
return out
|
def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1),
overlap_ratio=0.3):
"""
Masked normalized cross-correlation between arrays.
Parameters
----------
arr1 : ndarray
First array.
arr2 : ndarray
Second array. The dimensions of `arr2` along axes that are not
transformed should be equal to that of `arr1`.
m1 : ndarray
Mask of `arr1`. The mask should evaluate to `True`
(or 1) on valid pixels. `m1` should have the same shape as `arr1`.
m2 : ndarray
Mask of `arr2`. The mask should evaluate to `True`
(or 1) on valid pixels. `m2` should have the same shape as `arr2`.
mode : {'full', 'same'}, optional
'full':
This returns the convolution at each point of overlap. At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
The output is the same size as `arr1`, centered with respect
to the `‘full’` output. Boundary effects are less prominent.
axes : tuple of ints, optional
Axes along which to compute the cross-correlation.
overlap_ratio : float, optional
Minimum allowed overlap ratio between images. The correlation for
translations corresponding with an overlap ratio lower than this
threshold will be ignored. A lower `overlap_ratio` leads to smaller
maximum translation, while a higher `overlap_ratio` leads to greater
robustness against spurious matches due to small overlap between
masked images.
Returns
-------
out : ndarray
Masked normalized cross-correlation.
Raises
------
ValueError : if correlation `mode` is not valid, or array dimensions along
non-transformation axes are not equal.
References
----------
.. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
IEEE Transactions on Image Processing, vol. 21(5),
pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
.. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
if mode not in {'full', 'same'}:
raise ValueError(f"Correlation mode '{mode}' is not valid.")
fixed_image = np.asarray(arr1)
moving_image = np.asarray(arr2)
float_dtype = _supported_float_type(
[fixed_image.dtype, moving_image.dtype]
)
if float_dtype.kind == 'c':
raise ValueError("complex-valued arr1, arr2 are not supported")
fixed_image = fixed_image.astype(float_dtype)
fixed_mask = np.array(m1, dtype=bool)
moving_image = moving_image.astype(float_dtype)
moving_mask = np.array(m2, dtype=bool)
eps = np.finfo(float_dtype).eps
# Array dimensions along non-transformation axes should be equal.
all_axes = set(range(fixed_image.ndim))
for axis in (all_axes - set(axes)):
if fixed_image.shape[axis] != moving_image.shape[axis]:
raise ValueError(
f'Array shapes along non-transformation axes should be '
f'equal, but dimensions along axis {axis} are not.')
# Determine final size along transformation axes
# Note that it might be faster to compute Fourier transform in a slightly
# larger shape (`fast_shape`). Then, after all fourier transforms are done,
# we slice back to`final_shape` using `final_slice`.
final_shape = list(arr1.shape)
for axis in axes:
final_shape[axis] = fixed_image.shape[axis] + \
moving_image.shape[axis] - 1
final_shape = tuple(final_shape)
final_slice = tuple([slice(0, int(sz)) for sz in final_shape])
# Extent transform axes to the next fast length (i.e. multiple of 3, 5, or
# 7)
fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes])
# We use the new scipy.fft because they allow leaving the transform axes
# unchanged which was not possible with scipy.fftpack's
# fftn/ifftn in older versions of SciPy.
# E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4)
# results in arr_fft shape (4, 4, 7)
fft = partial(fftmodule.fftn, s=fast_shape, axes=axes)
_ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes)
def ifft(x):
return _ifft(x).real
fixed_image[np.logical_not(fixed_mask)] = 0.0
moving_image[np.logical_not(moving_mask)] = 0.0
# N-dimensional analog to rotation by 180deg is flip over all
# relevant axes.
# See [1] for discussion.
rotated_moving_image = _flip(moving_image, axes=axes)
rotated_moving_mask = _flip(moving_mask, axes=axes)
fixed_fft = fft(fixed_image)
rotated_moving_fft = fft(rotated_moving_image)
fixed_mask_fft = fft(fixed_mask.astype(float_dtype))
rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype))
# Calculate overlap of masks at every point in the convolution.
# Locations with high overlap should not be taken into account.
number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft)
number_overlap_masked_px[:] = np.round(number_overlap_masked_px)
number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps)
masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft)
masked_correlated_rotated_moving_fft = ifft(
fixed_mask_fft * rotated_moving_fft)
numerator = ifft(rotated_moving_fft * fixed_fft)
numerator -= masked_correlated_fixed_fft * \
masked_correlated_rotated_moving_fft / number_overlap_masked_px
fixed_squared_fft = fft(np.square(fixed_image))
fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft)
fixed_denom -= np.square(masked_correlated_fixed_fft) / \
number_overlap_masked_px
fixed_denom[:] = np.fmax(fixed_denom, 0.0)
rotated_moving_squared_fft = fft(np.square(rotated_moving_image))
moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft)
moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \
number_overlap_masked_px
moving_denom[:] = np.fmax(moving_denom, 0.0)
denom = np.sqrt(fixed_denom * moving_denom)
# Slice back to expected convolution shape.
numerator = numerator[final_slice]
denom = denom[final_slice]
number_overlap_masked_px = number_overlap_masked_px[final_slice]
if mode == 'same':
_centering = partial(_centered,
newshape=fixed_image.shape, axes=axes)
denom = _centering(denom)
numerator = _centering(numerator)
number_overlap_masked_px = _centering(number_overlap_masked_px)
# Pixels where `denom` is very small will introduce large
# numbers after division. To get around this problem,
# we zero-out problematic pixels.
tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True)
nonzero_indices = denom > tol
# explicitly set out dtype for compatibility with SciPy < 1.4, where
# fftmodule will be numpy.fft which always uses float64 dtype.
out = np.zeros_like(denom, dtype=float_dtype)
out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices]
np.clip(out, a_min=-1, a_max=1, out=out)
# Apply overlap ratio threshold
number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px,
axis=axes, keepdims=True)
out[number_overlap_masked_px < number_px_threshold] = 0.0
return out
|
12,784 |
def write_merkle_paths(root, leaves, storage_backend, merkle_directory):
# The root and leaves must be part of the same fully constructed
# Merkle tree. Create a path from
# Each leaf to the root node. This path will be downloaded by
# the client and used for verification of the tree. For each
# step in the path, keep track of both the sibling node and
# Whether this is a left or a right child.
for l in leaves:
merkle_path = {}
current_node = l
path_directions = {}
index = 0
while(current_node != root):
next_node = current_node.parent()
# TODO: determine left or right upon node creation.
# This currently determines which sibling to use by
# finding the sibling that does not match the current hash.
h_left = next_node.left().hash()
h_right = next_node.right().hash()
if current_node.hash() == h_left:
merkle_path[str(index)] = h_right
path_directions[str(index)] = -1
elif current_node.hash() == h_right:
merkle_path[str(index)] = h_left
path_directions[str(index)] = 1
else:
# error
pass
index = index + 1
current_node = next_node
# Write the path to the merkle_directory
file_contents = tuf.formats.build_dict_conforming_to_schema(
tuf.formats.SNAPSHOT_MERKLE_SCHEMA,
leaf_contents=l.contents(),
merkle_path=merkle_path,
path_directions=path_directions)
if storage_backend is None:
storage_backend = securesystemslib.storage.FilesystemBackend()
file_content = _get_written_metadata(file_contents)
file_object = tempfile.TemporaryFile()
file_object.write(file_content)
filename = os.path.join(merkle_directory, l.name() + '-snapshot.json')
storage_backend.put(file_object, filename)
file_object.close()
|
def _write_merkle_paths(root, leaves, storage_backend, merkle_directory):
# The root and leaves must be part of the same fully constructed
# Merkle tree. Create a path from
# Each leaf to the root node. This path will be downloaded by
# the client and used for verification of the tree. For each
# step in the path, keep track of both the sibling node and
# Whether this is a left or a right child.
for l in leaves:
merkle_path = {}
current_node = l
path_directions = {}
index = 0
while(current_node != root):
next_node = current_node.parent()
# TODO: determine left or right upon node creation.
# This currently determines which sibling to use by
# finding the sibling that does not match the current hash.
h_left = next_node.left().hash()
h_right = next_node.right().hash()
if current_node.hash() == h_left:
merkle_path[str(index)] = h_right
path_directions[str(index)] = -1
elif current_node.hash() == h_right:
merkle_path[str(index)] = h_left
path_directions[str(index)] = 1
else:
# error
pass
index = index + 1
current_node = next_node
# Write the path to the merkle_directory
file_contents = tuf.formats.build_dict_conforming_to_schema(
tuf.formats.SNAPSHOT_MERKLE_SCHEMA,
leaf_contents=l.contents(),
merkle_path=merkle_path,
path_directions=path_directions)
if storage_backend is None:
storage_backend = securesystemslib.storage.FilesystemBackend()
file_content = _get_written_metadata(file_contents)
file_object = tempfile.TemporaryFile()
file_object.write(file_content)
filename = os.path.join(merkle_directory, l.name() + '-snapshot.json')
storage_backend.put(file_object, filename)
file_object.close()
|
39,059 |
def _get_server_start_message(
host_ip_version: Optional[_IPKind] = None,
) -> Tuple[str, str]:
if host_ip_version is _IPKind.IPv6:
ip_repr = "%s://[%s]:%d"
else:
ip_repr = "%s://%s:%d"
message = f"Uvicorn running on {ip_repr} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(ip_repr, bold=True)
+ " (Press CTRL+C to quit)"
)
return message, color_message
|
def _get_server_start_message(
*, is_ipv6_message: bool = False
) -> Tuple[str, str]:
if host_ip_version is _IPKind.IPv6:
ip_repr = "%s://[%s]:%d"
else:
ip_repr = "%s://%s:%d"
message = f"Uvicorn running on {ip_repr} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(ip_repr, bold=True)
+ " (Press CTRL+C to quit)"
)
return message, color_message
|
57,366 |
def validate_tron(service_path: str, verbose: bool = False) -> bool:
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
elif verbose:
# service config has been validated and cron schedules should be safe to parse TODO: TRON-1761
service_config = load_tron_service_config(
service=service, cluster=cluster, soa_dir=soa_dir
)
for config in service_config:
cron_expression = config.get_cron_expression()
if cron_expression:
print_upcoming_runs(config, cron_expression)
return returncode
|
def validate_tron(service_path: str, verbose: bool = False) -> bool:
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
elif verbose:
# service config has been validated and cron schedules should be safe to parse
# TODO(TRON-1761): unify tron/paasta validate cron syntax validation
service_config = load_tron_service_config(
service=service, cluster=cluster, soa_dir=soa_dir
)
for config in service_config:
cron_expression = config.get_cron_expression()
if cron_expression:
print_upcoming_runs(config, cron_expression)
return returncode
|
56,024 |
def _get_class_name(model_class: Union[str, List[str]]):
if isinstance(model_class, (list, tuple)):
return " or ".join([f"[`~transformers.{c}`]" for c in model_class if c is not None])
return f"[`~transformers.{model_class}`]"
|
def _get_class_name(model_class: Union[str, List[str]]):
if isinstance(model_class, (list, tuple)):
return " or ".join([f"[`{c}`]" for c in model_class if c is not None])
return f"[`model_class`]"
|
1,831 |
def test_unique_util_missing_values_error_both_missing():
# _unique does not support both types of missing
values = np.array(['a', 'c', 'c', None, np.nan], dtype=object)
msg = ("Input wiith both types of missing, None and np.nan, is not "
"supported")
with pytest.raises(ValueError, match=msg):
_unique(values)
|
def test_unique_util_missing_values_error_both_missing():
# _unique does not support both types of missing
values = np.array(['a', 'c', 'c', None, np.nan], dtype=object)
msg = ("Input with both types of missing, None and np.nan, is not "
"supported")
with pytest.raises(ValueError, match=msg):
_unique(values)
|
21,921 |
def main_posix(kind, library_ext):
os.chdir(here_dir)
# Check availability of llvm-config
llvm_config = os.environ.get('LLVM_CONFIG', 'llvm-config')
print("LLVM version... ", end='')
sys.stdout.flush()
try:
out = subprocess.check_output([llvm_config, '--version'])
except (OSError, subprocess.CalledProcessError):
raise RuntimeError("%s failed executing, please point LLVM_CONFIG "
"to the path for llvm-config" % (llvm_config,))
out = out.decode('latin1')
print(out)
if not (out.startswith('9.0.') or out.startswith('7.0.')
or out.startswith('7.1.')):
msg = (
"Building llvmlite requires LLVM 7.0.x, 7.1.x or 8.0.x Be sure to "
"set LLVM_CONFIG to the right executable path.\n"
"Read the documentation at http://llvmlite.pydata.org/ for more "
"information about building llvmlite.\n"
)
raise RuntimeError(msg)
# Get LLVM information for building
libs = run_llvm_config(llvm_config, "--system-libs --libs all".split())
# Normalize whitespace (trim newlines)
os.environ['LLVM_LIBS'] = ' '.join(libs.split())
cxxflags = run_llvm_config(llvm_config, ["--cxxflags"])
# on OSX cxxflags has null bytes at the end of the string, remove them
cxxflags = cxxflags.replace('\0', '')
cxxflags = cxxflags.split() + ['-fno-rtti', '-g']
# look for SVML
include_dir = run_llvm_config(llvm_config, ['--includedir']).strip()
svml_indicator = os.path.join(include_dir, 'llvm', 'IR', 'SVML.inc')
if os.path.isfile(svml_indicator):
cxxflags = cxxflags + ['-DHAVE_SVML']
print('SVML detected')
else:
print('SVML not detected')
os.environ['LLVM_CXXFLAGS'] = ' '.join(cxxflags)
ldflags = run_llvm_config(llvm_config, ["--ldflags"])
os.environ['LLVM_LDFLAGS'] = ldflags.strip()
# static link libstdc++ for portability
if int(os.environ.get('LLVMLITE_CXX_STATIC_LINK', 0)):
os.environ['CXX_STATIC_LINK'] = "-static-libstdc++"
makefile = "Makefile.%s" % (kind,)
subprocess.check_call(['make', '-f', makefile])
shutil.copy('libllvmlite' + library_ext, target_dir)
|
def main_posix(kind, library_ext):
os.chdir(here_dir)
# Check availability of llvm-config
llvm_config = os.environ.get('LLVM_CONFIG', 'llvm-config')
print("LLVM version... ", end='')
sys.stdout.flush()
try:
out = subprocess.check_output([llvm_config, '--version'])
except (OSError, subprocess.CalledProcessError):
raise RuntimeError("%s failed executing, please point LLVM_CONFIG "
"to the path for llvm-config" % (llvm_config,))
out = out.decode('latin1')
print(out)
if not (out.startswith('9.0.') or out.startswith('8.0.') or out.startswith('7.0.')
or out.startswith('7.1.')):
msg = (
"Building llvmlite requires LLVM 7.0.x, 7.1.x or 8.0.x Be sure to "
"set LLVM_CONFIG to the right executable path.\n"
"Read the documentation at http://llvmlite.pydata.org/ for more "
"information about building llvmlite.\n"
)
raise RuntimeError(msg)
# Get LLVM information for building
libs = run_llvm_config(llvm_config, "--system-libs --libs all".split())
# Normalize whitespace (trim newlines)
os.environ['LLVM_LIBS'] = ' '.join(libs.split())
cxxflags = run_llvm_config(llvm_config, ["--cxxflags"])
# on OSX cxxflags has null bytes at the end of the string, remove them
cxxflags = cxxflags.replace('\0', '')
cxxflags = cxxflags.split() + ['-fno-rtti', '-g']
# look for SVML
include_dir = run_llvm_config(llvm_config, ['--includedir']).strip()
svml_indicator = os.path.join(include_dir, 'llvm', 'IR', 'SVML.inc')
if os.path.isfile(svml_indicator):
cxxflags = cxxflags + ['-DHAVE_SVML']
print('SVML detected')
else:
print('SVML not detected')
os.environ['LLVM_CXXFLAGS'] = ' '.join(cxxflags)
ldflags = run_llvm_config(llvm_config, ["--ldflags"])
os.environ['LLVM_LDFLAGS'] = ldflags.strip()
# static link libstdc++ for portability
if int(os.environ.get('LLVMLITE_CXX_STATIC_LINK', 0)):
os.environ['CXX_STATIC_LINK'] = "-static-libstdc++"
makefile = "Makefile.%s" % (kind,)
subprocess.check_call(['make', '-f', makefile])
shutil.copy('libllvmlite' + library_ext, target_dir)
|
3,145 |
def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
"""
Concatenate chunks of data read with low_memory=True.
The tricky part is handling Categoricals, where different chunks
may have different inferred categories.
"""
names = list(chunks[0].keys())
warning_columns = []
result = {}
for name in names:
arrs = [chunk.pop(name) for chunk in chunks]
# Check each arr for consistent types.
dtypes = {a.dtype for a in arrs}
# TODO: shouldn't we exclude all EA dtypes here?
numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
if len(numpy_dtypes) > 1:
# error: Argument 1 to "find_common_type" has incompatible type
# "Set[Any]"; expected "Sequence[Union[dtype[Any], None, type,
# _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]]"
common_type = np.find_common_type(
numpy_dtypes, # type: ignore[arg-type]
[],
)
# error: Non-overlapping equality check (left operand type: "dtype[Any]",
# right operand type: "Type[object]")
if common_type == object: # type: ignore[comparison-overlap]
warning_columns.append(str(name))
dtype = dtypes.pop()
if is_categorical_dtype(dtype):
result[name] = union_categoricals(arrs, sort_categories=False)
else:
if isinstance(dtype, ExtensionDtype):
# TODO: concat_compat?
array_type = dtype.construct_array_type()
# error: Argument 1 to "_concat_same_type" of "ExtensionArray"
# has incompatible type "List[Union[ExtensionArray, ndarray]]";
# expected "Sequence[ExtensionArray]"
result[name] = array_type._concat_same_type(
arrs # type: ignore[arg-type]
)
else:
result[name] = np.concatenate(arrs)
if warning_columns:
warning_names = ",".join(warning_columns)
warning_message = " ".join(
[
f"Columns ({warning_names}) have mixed types. "
f"Specify dtype option on import or set low_memory=False. "
]
)
warnings.warn(warning_message, DtypeWarning, stacklevel=8)
return result
|
def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
"""
Concatenate chunks of data read with low_memory=True.
The tricky part is handling Categoricals, where different chunks
may have different inferred categories.
"""
names = list(chunks[0].keys())
warning_columns = []
result = {}
for name in names:
arrs = [chunk.pop(name) for chunk in chunks]
# Check each arr for consistent types.
dtypes = {a.dtype for a in arrs}
# TODO: shouldn't we exclude all EA dtypes here?
numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
if len(numpy_dtypes) > 1:
# error: Argument 1 to "find_common_type" has incompatible type
# "Set[Any]"; expected "Sequence[Union[dtype[Any], None, type,
# _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]]"
common_type = np.find_common_type(
numpy_dtypes, # type: ignore[arg-type]
[],
)
# error: Non-overlapping equality check (left operand type: "dtype[Any]",
# right operand type: "Type[object]")
if common_type == object: # type: ignore[comparison-overlap]
warning_columns.append(str(name))
dtype = dtypes.pop()
if is_categorical_dtype(dtype):
result[name] = union_categoricals(arrs, sort_categories=False)
else:
if isinstance(dtype, ExtensionDtype):
# TODO: concat_compat?
array_type = dtype.construct_array_type()
# error: Argument 1 to "_concat_same_type" of "ExtensionArray"
# has incompatible type "List[Union[ExtensionArray, ndarray]]";
# expected "Sequence[ExtensionArray]"
result[name] = array_type._concat_same_type(
arrs # type: ignore[arg-type]
)
else:
result[name] = np.concatenate(arrs)
if warning_columns:
warning_names = ",".join(warning_columns)
warning_message = " ".join(
[
f"Columns ({warning_names}) have mixed types. "
f"Specify dtype option on import or set low_memory=False."
]
)
warnings.warn(warning_message, DtypeWarning, stacklevel=8)
return result
|
12,036 |
def as_data_frame(cube, dropna=True, asmultiindex=False):
"""
Convert a 2D cube to a Pandas DataFrame.
Args:
* cube - The cube to convert to a Pandas DataFrame.
Kwargs:
* dropna - Remove missing values from returned dataframe.
Defaults to True.
.. note::
TBC
"""
data = cube.data
if ma.isMaskedArray(data):
data = data.astype("f").filled(np.nan)
elif copy:
data = data.copy()
# Extract dim coord information
if cube.ndim != len(cube.dim_coords):
# Create dummy dim coord information if dim coords not defined
coord_names = ["dim" + str(n) for n in range(cube.ndim)]
coords = [range(dim) for dim in cube.shape]
for c in cube.dim_coords:
for i, dummyc in enumerate(coords):
if len(dummyc) == len(c.points):
coords[i] = _as_pandas_coord(c)
coord_names[i] = c.name()
else:
pass
else:
coord_names = list(map(lambda x: x.name(), cube.dim_coords))
coords = list(map(lambda x: _as_pandas_coord(x), cube.dim_coords))
index = pandas.MultiIndex.from_product(coords, names=coord_names)
data_frame = pandas.DataFrame({cube.name(): data.flatten()}, index=index)
if dropna:
data_frame.dropna(inplace=True)
if not asmultiindex:
data_frame.reset_index(inplace=True)
return data_frame
|
def as_data_frame(cube, dropna=False, asmultiindex=False):
"""
Convert a 2D cube to a Pandas DataFrame.
Args:
* cube - The cube to convert to a Pandas DataFrame.
Kwargs:
* dropna - Remove missing values from returned dataframe.
Defaults to True.
.. note::
TBC
"""
data = cube.data
if ma.isMaskedArray(data):
data = data.astype("f").filled(np.nan)
elif copy:
data = data.copy()
# Extract dim coord information
if cube.ndim != len(cube.dim_coords):
# Create dummy dim coord information if dim coords not defined
coord_names = ["dim" + str(n) for n in range(cube.ndim)]
coords = [range(dim) for dim in cube.shape]
for c in cube.dim_coords:
for i, dummyc in enumerate(coords):
if len(dummyc) == len(c.points):
coords[i] = _as_pandas_coord(c)
coord_names[i] = c.name()
else:
pass
else:
coord_names = list(map(lambda x: x.name(), cube.dim_coords))
coords = list(map(lambda x: _as_pandas_coord(x), cube.dim_coords))
index = pandas.MultiIndex.from_product(coords, names=coord_names)
data_frame = pandas.DataFrame({cube.name(): data.flatten()}, index=index)
if dropna:
data_frame.dropna(inplace=True)
if not asmultiindex:
data_frame.reset_index(inplace=True)
return data_frame
|
39,652 |
def _get_output_dask_ar_meta_for_estimator(model_fn, estimator, input_dask_ar):
"""
Returns the output metadata array
for the model function (predict, transform etc)
by running the appropriate function on dummy data
of shape (1, n_features)
Parameters
----------
model_fun: Model function
_predict, _transform etc
estimator : Estimator
The underlying estimator that is fit.
input_dask_ar: The input dask_array
Returns
-------
metadata: metadata of output dask array
"""
# sklearn fails if input array has size size
# It requires at least 1 sample to run successfully
imput_meta = input_dask_ar._meta
if hasattr(imput_meta, "__array_function__"):
ar = np.zeros(
shape=(1, input_dask_ar.shape[1]),
dtype=input_dask_ar.dtype,
like=imput_meta,
)
elif "scipy.sparse" in type(imput_meta).__module__:
# sparse matrices dont support
# `like` due to non implimented __array_function__
# Refer https:/q/github.com/scipy/scipy/issues/10362
# Note below works for both cupy and scipy sparse matrices
ar = type(imput_meta)((1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
else:
msg = (
"\nYou did not provide metadata, so Dask is running the"
"function on a small dataset to guess output types. "
"It is possible that Dask will guess incorrectly.\n"
"To provide an explicit output types or to silence this message, "
"please provide the `predict_meta`, `predict_proba_meta`,"
"`transform_meta` as appropiate"
)
warnings.warn(msg)
ar = np.zeros(shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
return model_fn(ar, estimator)
|
def _get_output_dask_ar_meta_for_estimator(model_fn, estimator, input_dask_ar):
"""
Returns the output metadata array
for the model function (predict, transform etc)
by running the appropriate function on dummy data
of shape (1, n_features)
Parameters
----------
model_fun: Model function
_predict, _transform etc
estimator : Estimator
The underlying estimator that is fit.
input_dask_ar: The input dask_array
Returns
-------
metadata: metadata of output dask array
"""
# sklearn fails if input array has size size
# It requires at least 1 sample to run successfully
imput_meta = input_dask_ar._meta
if hasattr(imput_meta, "__array_function__"):
ar = np.zeros(
shape=(1, input_dask_ar.shape[1]),
dtype=input_dask_ar.dtype,
like=imput_meta,
)
elif "scipy.sparse" in type(imput_meta).__module__:
# sparse matrices dont support
# `like` due to non implimented __array_function__
# Refer https:/q/github.com/scipy/scipy/issues/10362
# Note below works for both cupy and scipy sparse matrices
ar = type(imput_meta)((1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
else:
msg = (
"\nYou did not provide metadata for the output, so Dask is running the"
"function on a small dataset to guess output types. "
"It is possible that Dask will guess incorrectly.\n"
"To provide an explicit output types or to silence this message, "
"please provide the `predict_meta`, `predict_proba_meta`,"
"`transform_meta` as appropiate"
)
warnings.warn(msg)
ar = np.zeros(shape=(1, input_dask_ar.shape[1]), dtype=input_dask_ar.dtype)
return model_fn(ar, estimator)
|
40,105 |
def create_symbolic_link_edges(data_graph):
edge_id = 0
for node in data_graph['nodes']:
if node['group'] == 'inode/symlink':
link_to = node['full_file_type'].split(' ')[3].split('\'')[1]
for match in data_graph['nodes']:
if match['label'] == link_to:
edge = {'source': node['id'], 'target': match['id'], 'id': edge_id}
data_graph['edges'].append(edge)
edge_id += 1
return data_graph, edge_id
|
def create_symbolic_link_edges(data_graph):
edge_id = 0
for node in data_graph['nodes']:
if node['group'] == 'inode/symlink':
link_to = node['full_file_type'].split(' ')[3].split('\'')[1]
for match in data_graph['nodes']:
if match['label'] == link_to:
edge = {'source': node['id'], 'target': match['id'], 'id': edge_id}
data_graph['edges'].append(edge)
edge_id += 1
return edge_id
|
22,661 |
def _complete_gallery_conf(sphinx_gallery_conf, src_dir, plot_gallery,
abort_on_example_error, lang='python',
builder_name='html', app=None):
gallery_conf = copy.deepcopy(DEFAULT_GALLERY_CONF)
gallery_conf.update(sphinx_gallery_conf)
if sphinx_gallery_conf.get('find_mayavi_figures', False):
logger.warning(
"Deprecated image scraping variable `find_mayavi_figures`\n"
"detected, use `image_scrapers` instead as:\n\n"
" image_scrapers=('matplotlib', 'mayavi')",
type=DeprecationWarning)
gallery_conf['image_scrapers'] += ('mayavi',)
gallery_conf.update(plot_gallery=plot_gallery)
gallery_conf.update(abort_on_example_error=abort_on_example_error)
gallery_conf['src_dir'] = src_dir
gallery_conf['app'] = app
if gallery_conf.get("mod_example_dir", False):
backreferences_warning = """\n========
Sphinx-Gallery found the configuration key 'mod_example_dir'. This
is deprecated, and you should now use the key 'backreferences_dir'
instead. Support for 'mod_example_dir' will be removed in a subsequent
version of Sphinx-Gallery. For more details, see the backreferences
documentation:
https://sphinx-gallery.github.io/configuration.html#references-to-examples""" # noqa: E501
gallery_conf['backreferences_dir'] = gallery_conf['mod_example_dir']
logger.warning(
backreferences_warning,
type=DeprecationWarning)
if gallery_conf.get("thumbnail_size", False):
thumbnail_warning = ("'thumbnail_size' configuration not set. The "
"default size will be changed from (400, 280) "
"to (160, 112) in version 0.9.0.")
logger.warning(thumbnail_warning, type=DeprecationWarning)
# Check capture_repr
capture_repr = gallery_conf['capture_repr']
supported_reprs = ['__repr__', '__str__', '_repr_html_']
if isinstance(capture_repr, tuple):
for rep in capture_repr:
if rep not in supported_reprs:
raise ConfigError("All entries in 'capture_repr' must be one "
"of %s, got: %s" % (supported_reprs, rep))
else:
raise ConfigError("'capture_repr' must be a tuple, got: %s"
% (type(capture_repr),))
# Check ignore_repr_types
if not isinstance(gallery_conf['ignore_repr_types'], str):
raise ConfigError("'ignore_repr_types' must be a string, got: %s"
% (type(gallery_conf['ignore_repr_types']),))
# deal with show_memory
gallery_conf['memory_base'] = 0.
if gallery_conf['show_memory']:
if not callable(gallery_conf['show_memory']): # True-like
try:
from memory_profiler import memory_usage # noqa
except ImportError:
logger.warning("Please install 'memory_profiler' to enable "
"peak memory measurements.")
gallery_conf['show_memory'] = False
else:
def call_memory(func):
mem, out = memory_usage(func, max_usage=True, retval=True,
multiprocess=True)
try:
mem = mem[0] # old MP always returned a list
except TypeError: # 'float' object is not subscriptable
pass
return mem, out
gallery_conf['call_memory'] = call_memory
gallery_conf['memory_base'] = _get_memory_base(gallery_conf)
else:
gallery_conf['call_memory'] = gallery_conf['show_memory']
if not gallery_conf['show_memory']: # can be set to False above
def call_memory(func):
return 0., func()
gallery_conf['call_memory'] = call_memory
assert callable(gallery_conf['call_memory'])
# deal with scrapers
scrapers = gallery_conf['image_scrapers']
if not isinstance(scrapers, (tuple, list)):
scrapers = [scrapers]
scrapers = list(scrapers)
for si, scraper in enumerate(scrapers):
if isinstance(scraper, str):
if scraper in _scraper_dict:
scraper = _scraper_dict[scraper]
else:
orig_scraper = scraper
try:
scraper = import_module(scraper)
scraper = getattr(scraper, '_get_sg_image_scraper')
scraper = scraper()
except Exception as exp:
raise ConfigError('Unknown image scraper %r, got:\n%s'
% (orig_scraper, exp))
scrapers[si] = scraper
if not callable(scraper):
raise ConfigError('Scraper %r was not callable' % (scraper,))
gallery_conf['image_scrapers'] = tuple(scrapers)
del scrapers
# Here we try to set up matplotlib but don't raise an error,
# we will raise an error later when we actually try to use it
# (if we do so) in scrapers.py.
# In principle we could look to see if there is a matplotlib scraper
# in our scrapers list, but this would be backward incompatible with
# anyone using or relying on our Agg-setting behavior (e.g., for some
# custom matplotlib SVG scraper as in our docs).
# Eventually we can make this a config var like matplotlib_agg or something
# if people need us not to set it to Agg.
try:
_import_matplotlib()
except (ImportError, ValueError):
pass
# compress_images
compress_images = gallery_conf['compress_images']
if isinstance(compress_images, str):
compress_images = [compress_images]
elif not isinstance(compress_images, (tuple, list)):
raise ConfigError('compress_images must be a tuple, list, or str, '
'got %s' % (type(compress_images),))
compress_images = list(compress_images)
allowed_values = ('images', 'thumbnails')
pops = list()
for ki, kind in enumerate(compress_images):
if kind not in allowed_values:
if kind.startswith('-'):
pops.append(ki)
continue
raise ConfigError('All entries in compress_images must be one of '
'%s or a command-line switch starting with "-", '
'got %r' % (allowed_values, kind))
compress_images_args = [compress_images.pop(p) for p in pops[::-1]]
if len(compress_images) and not _has_optipng():
logger.warning(
'optipng binaries not found, PNG %s will not be optimized'
% (' and '.join(compress_images),))
compress_images = ()
gallery_conf['compress_images'] = compress_images
gallery_conf['compress_images_args'] = compress_images_args
# deal with resetters
resetters = gallery_conf['reset_modules']
if not isinstance(resetters, (tuple, list)):
resetters = [resetters]
resetters = list(resetters)
for ri, resetter in enumerate(resetters):
if isinstance(resetter, str):
if resetter not in _reset_dict:
raise ConfigError('Unknown module resetter named %r'
% (resetter,))
resetters[ri] = _reset_dict[resetter]
elif not callable(resetter):
raise ConfigError('Module resetter %r was not callable'
% (resetter,))
gallery_conf['reset_modules'] = tuple(resetters)
lang = lang if lang in ('python', 'python3', 'default') else 'python'
gallery_conf['lang'] = lang
del resetters
# Ensure the first cell text is a string if we have it
first_cell = gallery_conf.get("first_notebook_cell")
if (not isinstance(first_cell, str)) and (first_cell is not None):
raise ConfigError("The 'first_notebook_cell' parameter must be type "
"str or None, found type %s" % type(first_cell))
# Ensure the last cell text is a string if we have it
last_cell = gallery_conf.get("last_notebook_cell")
if (not isinstance(last_cell, str)) and (last_cell is not None):
raise ConfigError("The 'last_notebook_cell' parameter must be type str"
" or None, found type %s" % type(last_cell))
# Check pypandoc
pypandoc = gallery_conf['pypandoc']
if not isinstance(pypandoc, (dict, bool)):
raise ConfigError("'pypandoc' parameter must be of type bool or dict,"
"got: %s." % type(pypandoc))
gallery_conf['pypandoc'] = dict() if pypandoc is True else pypandoc
has_pypandoc, version = _has_pypandoc()
if isinstance(gallery_conf['pypandoc'], dict) and has_pypandoc is None:
logger.warning("'pypandoc' not available. Using Sphinx-Gallery to "
"convert rst text blocks to markdown for .ipynb files.")
gallery_conf['pypandoc'] = False
else:
logger.info("Using pandoc version: %s to convert rst text blocks to "
"markdown for .ipynb files" % (version,))
if isinstance(pypandoc, dict):
accepted_keys = ('extra_args', 'filters')
for key in pypandoc:
if key not in accepted_keys:
raise ConfigError("'pypandoc' only accepts the following key "
"values: %s, got: %s."
% (accepted_keys, key))
# Make it easy to know which builder we're in
gallery_conf['builder_name'] = builder_name
gallery_conf['titles'] = {}
# Ensure 'backreferences_dir' is str, pathlib.Path or None
backref = gallery_conf['backreferences_dir']
if (not isinstance(backref, (str, pathlib.Path))) and \
(backref is not None):
raise ConfigError("The 'backreferences_dir' parameter must be of type "
"str, pathlib.Path or None, "
"found type %s" % type(backref))
# if 'backreferences_dir' is pathlib.Path, make str for Python <=3.5
# compatibility
if isinstance(backref, pathlib.Path):
gallery_conf['backreferences_dir'] = str(backref)
if not isinstance(gallery_conf['css'], (list, tuple)):
raise ConfigError('gallery_conf["css"] must be list or tuple, got %r'
% (gallery_conf['css'],))
for css in gallery_conf['css']:
if css not in _KNOWN_CSS:
raise ConfigError('Unknown css %r, must be one of %r'
% (css, _KNOWN_CSS))
if gallery_conf['app'] is not None: # can be None in testing
gallery_conf['app'].add_css_file(css + '.css')
return gallery_conf
|
def _complete_gallery_conf(sphinx_gallery_conf, src_dir, plot_gallery,
abort_on_example_error, lang='python',
builder_name='html', app=None):
gallery_conf = copy.deepcopy(DEFAULT_GALLERY_CONF)
gallery_conf.update(sphinx_gallery_conf)
if sphinx_gallery_conf.get('find_mayavi_figures', False):
logger.warning(
"Deprecated image scraping variable `find_mayavi_figures`\n"
"detected, use `image_scrapers` instead as:\n\n"
" image_scrapers=('matplotlib', 'mayavi')",
type=DeprecationWarning)
gallery_conf['image_scrapers'] += ('mayavi',)
gallery_conf.update(plot_gallery=plot_gallery)
gallery_conf.update(abort_on_example_error=abort_on_example_error)
gallery_conf['src_dir'] = src_dir
gallery_conf['app'] = app
if gallery_conf.get("mod_example_dir", False):
backreferences_warning = """\n========
Sphinx-Gallery found the configuration key 'mod_example_dir'. This
is deprecated, and you should now use the key 'backreferences_dir'
instead. Support for 'mod_example_dir' will be removed in a subsequent
version of Sphinx-Gallery. For more details, see the backreferences
documentation:
https://sphinx-gallery.github.io/configuration.html#references-to-examples""" # noqa: E501
gallery_conf['backreferences_dir'] = gallery_conf['mod_example_dir']
logger.warning(
backreferences_warning,
type=DeprecationWarning)
if 'thumbnail_size' not in sphinx_gallery_conf:
thumbnail_warning = ("'thumbnail_size' configuration not set. The "
"default size will be changed from (400, 280) "
"to (160, 112) in version 0.9.0.")
logger.warning(thumbnail_warning, type=DeprecationWarning)
# Check capture_repr
capture_repr = gallery_conf['capture_repr']
supported_reprs = ['__repr__', '__str__', '_repr_html_']
if isinstance(capture_repr, tuple):
for rep in capture_repr:
if rep not in supported_reprs:
raise ConfigError("All entries in 'capture_repr' must be one "
"of %s, got: %s" % (supported_reprs, rep))
else:
raise ConfigError("'capture_repr' must be a tuple, got: %s"
% (type(capture_repr),))
# Check ignore_repr_types
if not isinstance(gallery_conf['ignore_repr_types'], str):
raise ConfigError("'ignore_repr_types' must be a string, got: %s"
% (type(gallery_conf['ignore_repr_types']),))
# deal with show_memory
gallery_conf['memory_base'] = 0.
if gallery_conf['show_memory']:
if not callable(gallery_conf['show_memory']): # True-like
try:
from memory_profiler import memory_usage # noqa
except ImportError:
logger.warning("Please install 'memory_profiler' to enable "
"peak memory measurements.")
gallery_conf['show_memory'] = False
else:
def call_memory(func):
mem, out = memory_usage(func, max_usage=True, retval=True,
multiprocess=True)
try:
mem = mem[0] # old MP always returned a list
except TypeError: # 'float' object is not subscriptable
pass
return mem, out
gallery_conf['call_memory'] = call_memory
gallery_conf['memory_base'] = _get_memory_base(gallery_conf)
else:
gallery_conf['call_memory'] = gallery_conf['show_memory']
if not gallery_conf['show_memory']: # can be set to False above
def call_memory(func):
return 0., func()
gallery_conf['call_memory'] = call_memory
assert callable(gallery_conf['call_memory'])
# deal with scrapers
scrapers = gallery_conf['image_scrapers']
if not isinstance(scrapers, (tuple, list)):
scrapers = [scrapers]
scrapers = list(scrapers)
for si, scraper in enumerate(scrapers):
if isinstance(scraper, str):
if scraper in _scraper_dict:
scraper = _scraper_dict[scraper]
else:
orig_scraper = scraper
try:
scraper = import_module(scraper)
scraper = getattr(scraper, '_get_sg_image_scraper')
scraper = scraper()
except Exception as exp:
raise ConfigError('Unknown image scraper %r, got:\n%s'
% (orig_scraper, exp))
scrapers[si] = scraper
if not callable(scraper):
raise ConfigError('Scraper %r was not callable' % (scraper,))
gallery_conf['image_scrapers'] = tuple(scrapers)
del scrapers
# Here we try to set up matplotlib but don't raise an error,
# we will raise an error later when we actually try to use it
# (if we do so) in scrapers.py.
# In principle we could look to see if there is a matplotlib scraper
# in our scrapers list, but this would be backward incompatible with
# anyone using or relying on our Agg-setting behavior (e.g., for some
# custom matplotlib SVG scraper as in our docs).
# Eventually we can make this a config var like matplotlib_agg or something
# if people need us not to set it to Agg.
try:
_import_matplotlib()
except (ImportError, ValueError):
pass
# compress_images
compress_images = gallery_conf['compress_images']
if isinstance(compress_images, str):
compress_images = [compress_images]
elif not isinstance(compress_images, (tuple, list)):
raise ConfigError('compress_images must be a tuple, list, or str, '
'got %s' % (type(compress_images),))
compress_images = list(compress_images)
allowed_values = ('images', 'thumbnails')
pops = list()
for ki, kind in enumerate(compress_images):
if kind not in allowed_values:
if kind.startswith('-'):
pops.append(ki)
continue
raise ConfigError('All entries in compress_images must be one of '
'%s or a command-line switch starting with "-", '
'got %r' % (allowed_values, kind))
compress_images_args = [compress_images.pop(p) for p in pops[::-1]]
if len(compress_images) and not _has_optipng():
logger.warning(
'optipng binaries not found, PNG %s will not be optimized'
% (' and '.join(compress_images),))
compress_images = ()
gallery_conf['compress_images'] = compress_images
gallery_conf['compress_images_args'] = compress_images_args
# deal with resetters
resetters = gallery_conf['reset_modules']
if not isinstance(resetters, (tuple, list)):
resetters = [resetters]
resetters = list(resetters)
for ri, resetter in enumerate(resetters):
if isinstance(resetter, str):
if resetter not in _reset_dict:
raise ConfigError('Unknown module resetter named %r'
% (resetter,))
resetters[ri] = _reset_dict[resetter]
elif not callable(resetter):
raise ConfigError('Module resetter %r was not callable'
% (resetter,))
gallery_conf['reset_modules'] = tuple(resetters)
lang = lang if lang in ('python', 'python3', 'default') else 'python'
gallery_conf['lang'] = lang
del resetters
# Ensure the first cell text is a string if we have it
first_cell = gallery_conf.get("first_notebook_cell")
if (not isinstance(first_cell, str)) and (first_cell is not None):
raise ConfigError("The 'first_notebook_cell' parameter must be type "
"str or None, found type %s" % type(first_cell))
# Ensure the last cell text is a string if we have it
last_cell = gallery_conf.get("last_notebook_cell")
if (not isinstance(last_cell, str)) and (last_cell is not None):
raise ConfigError("The 'last_notebook_cell' parameter must be type str"
" or None, found type %s" % type(last_cell))
# Check pypandoc
pypandoc = gallery_conf['pypandoc']
if not isinstance(pypandoc, (dict, bool)):
raise ConfigError("'pypandoc' parameter must be of type bool or dict,"
"got: %s." % type(pypandoc))
gallery_conf['pypandoc'] = dict() if pypandoc is True else pypandoc
has_pypandoc, version = _has_pypandoc()
if isinstance(gallery_conf['pypandoc'], dict) and has_pypandoc is None:
logger.warning("'pypandoc' not available. Using Sphinx-Gallery to "
"convert rst text blocks to markdown for .ipynb files.")
gallery_conf['pypandoc'] = False
else:
logger.info("Using pandoc version: %s to convert rst text blocks to "
"markdown for .ipynb files" % (version,))
if isinstance(pypandoc, dict):
accepted_keys = ('extra_args', 'filters')
for key in pypandoc:
if key not in accepted_keys:
raise ConfigError("'pypandoc' only accepts the following key "
"values: %s, got: %s."
% (accepted_keys, key))
# Make it easy to know which builder we're in
gallery_conf['builder_name'] = builder_name
gallery_conf['titles'] = {}
# Ensure 'backreferences_dir' is str, pathlib.Path or None
backref = gallery_conf['backreferences_dir']
if (not isinstance(backref, (str, pathlib.Path))) and \
(backref is not None):
raise ConfigError("The 'backreferences_dir' parameter must be of type "
"str, pathlib.Path or None, "
"found type %s" % type(backref))
# if 'backreferences_dir' is pathlib.Path, make str for Python <=3.5
# compatibility
if isinstance(backref, pathlib.Path):
gallery_conf['backreferences_dir'] = str(backref)
if not isinstance(gallery_conf['css'], (list, tuple)):
raise ConfigError('gallery_conf["css"] must be list or tuple, got %r'
% (gallery_conf['css'],))
for css in gallery_conf['css']:
if css not in _KNOWN_CSS:
raise ConfigError('Unknown css %r, must be one of %r'
% (css, _KNOWN_CSS))
if gallery_conf['app'] is not None: # can be None in testing
gallery_conf['app'].add_css_file(css + '.css')
return gallery_conf
|
2,661 |
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array-like of shape (n_samples_X, 2)
A feature array_like object.
Y : array-like of shape (n_samples_Y, 2), default=None
If `None`, uses `Y=X`.
Returns
-------
distance : ndarray of shape (n_samples_X, n_samples_Y)
The distance matrix.
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from ..metrics import DistanceMetric
return DistanceMetric.get_metric("haversine").pairwise(X, Y)
|
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array-like of shape (n_samples_X, 2)
A feature array.
Y : array-like of shape (n_samples_Y, 2), default=None
If `None`, uses `Y=X`.
Returns
-------
distance : ndarray of shape (n_samples_X, n_samples_Y)
The distance matrix.
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from ..metrics import DistanceMetric
return DistanceMetric.get_metric("haversine").pairwise(X, Y)
|
38,999 |
def test_enum_str_default():
class MyEnum(str, Enum):
FOO = 'foo'
class UserModel(BaseModel):
friends: MyEnum = MyEnum.FOO
generated_schema_properties = UserModel.schema().get('properties', {})
assert generated_schema_properties.get('friends', {}).get('default', None) is MyEnum.FOO.value
|
def test_enum_str_default():
class MyEnum(str, Enum):
FOO = 'foo'
class UserModel(BaseModel):
friends: MyEnum = MyEnum.FOO
assert UserModel.schema()['properties']['friends']['default'] is MyEnum.FOO.value
|
52,754 |
def rec_iter(
filenames: List[str],
sensor: Optional[str],
ignore_rules: Dict[str, Dict[str, List[Tuple[int, int]]]],
) -> Generator[Record, None, None]:
ignorenets = ignore_rules.get("IGNORENETS", {})
neverignore = ignore_rules.get("NEVERIGNORE", {})
for fname in filenames:
with P0fFile(fname) as fdesc:
for line in fdesc:
if not line:
continue
if "mod" not in line:
LOGGER.warning("no mod detected [%r]", line)
continue
if line["mod"] not in ["syn", "syn+ack"]:
continue
if "subj" not in line or line["subj"] not in line:
LOGGER.warning("no subj detected [%r]", line)
continue
if "raw_sig" not in line:
LOGGER.warning("no raw_sig detected [%r]", line)
continue
infos = {}
if "os" in line and line["os"] != "???":
infos["os"] = line["os"]
if "dist" in line:
infos["dist"] = line["dist"]
if "params" in line and line["params"].lower() != "none":
infos["params"] = line["params"]
host = line[line["subj"]].split("/")[0]
srvport = int(line["srv"].split("/")[1])
for rec in handle_rec(
# sensor
sensor,
# ignorenets,
ignorenets,
# neverignore,
neverignore,
# timestamp
timestamp=line["ts"],
# uid
uid=None,
# host
host=host,
# srvport
srvport=srvport,
# recon_type
recon_type="P0FV3_%s" % line["mod"].upper(),
# source
source="P0FV3",
# value
value=line["raw_sig"],
# targetval
targetval=None,
):
rec[1]["infos"] = infos
yield rec
|
def rec_iter(
filenames: List[str],
sensor: Optional[str],
ignore_rules: Dict[str, Dict[str, List[Tuple[int, int]]]],
) -> Generator[Record, None, None]:
ignorenets = ignore_rules.get("IGNORENETS", {})
neverignore = ignore_rules.get("NEVERIGNORE", {})
for fname in filenames:
with P0fFile(fname) as fdesc:
for line in fdesc:
if not line:
continue
if "mod" not in line:
LOGGER.warning("no mod detected [%r]", line)
continue
if line["mod"] not in ["syn", "syn+ack"]:
continue
if "subj" not in line or line["subj"] not in line:
LOGGER.warning("no subj detected [%r]", line)
continue
if "raw_sig" not in line:
LOGGER.warning("no raw_sig detected [%r]", line)
continue
infos = {}
if "os" in line and line["os"] != "???":
infos["os"] = line["os"]
if "dist" in line:
infos["dist"] = line["dist"]
if "params" in line and line["params"].lower() != "none":
infos["params"] = line["params"]
host = line[line["subj"]].split("/")[0]
srvport = int(line["srv"].split("/")[1])
for rec in handle_rec(
# sensor
sensor,
# ignorenets,
ignorenets,
# neverignore,
neverignore,
# timestamp
timestamp=line["ts"],
# uid
uid=None,
# host
host=host,
# srvport
srvport=srvport,
# recon_type
recon_type="P0FV3_%s" % line["mod"].upper(),
# source
source="P0FV3",
# value
value=line["raw_sig"],
# targetval
targetval=None,
):
if infos:
rec["infos"] = infos
yield rec
|
7,001 |
def load(modelXbrl, uri, base=None, referringElement=None, isEntry=False, isDiscovered=False, isIncluded=None, isSupplemental=False, namespace=None, reloadCache=False, **kwargs):
"""Returns a new modelDocument, performing DTS discovery for instance, inline XBRL, schema,
linkbase, and versioning report entry urls.
:param uri: Identification of file to load by string filename or by a FileSource object with a selected content file.
:type uri: str or FileSource
:param referringElement: Source element causing discovery or loading of this document, such as an import or xlink:href
:type referringElement: ModelObject
:param isEntry: True for an entry document
:type isEntry: bool
:param isDiscovered: True if this document is discovered by XBRL rules, otherwise False (such as when schemaLocation and xmlns were the cause of loading the schema)
:type isDiscovered: bool
:param isIncluded: True if this document is the target of an xs:include
:type isIncluded: bool
:param isSupplemental: True if this is processed for link relationships even if neither isEntry or isDiscovered, such as when adding additional language or documentation linkbases
:type isIncluded: bool
:param namespace: The schema namespace of this document, if known and applicable
:type isSupplemental: True if this document is supplemental (not discovered or in DTS but adds labels or instance facts)
:type namespace: str
:param reloadCache: True if desired to reload the web cache for any web-referenced files.
:type reloadCache: bool
:param checkModifiedTime: True if desired to check modifed time of web cached entry point (ahead of usual time stamp checks).
:type checkModifiedTime: bool
"""
if referringElement is None: # used for error messages
referringElement = modelXbrl
normalizedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(uri, base)
modelDocument = modelXbrl.urlDocs.get(normalizedUri)
if modelDocument:
return modelDocument
elif modelXbrl.urlUnloadableDocs.get(normalizedUri): # only return None if in this list and marked True (really not loadable)
return None
elif not normalizedUri:
modelXbrl.error("FileNotLoadable",
_("File name absent, document can not be loaded."),
modelObject=referringElement, fileName=normalizedUri)
return None
if isEntry:
modelXbrl.entryLoadingUrl = normalizedUri # for error loggiong during loading
modelXbrl.uri = normalizedUri
modelXbrl.uriDir = os.path.dirname(normalizedUri)
for i in range(modelXbrl.modelManager.disclosureSystem.maxSubmissionSubdirectoryEntryNesting):
modelXbrl.uriDir = os.path.dirname(modelXbrl.uriDir)
if modelXbrl.modelManager.validateDisclosureSystem and \
not normalizedUri.startswith(modelXbrl.uriDir) and \
not modelXbrl.modelManager.disclosureSystem.hrefValid(normalizedUri):
blocked = modelXbrl.modelManager.disclosureSystem.blockDisallowedReferences
if normalizedUri not in modelXbrl.urlUnloadableDocs:
# HMRC note, HMRC.blockedFile should be in this list if hmrc-taxonomies.xml is maintained an dup to date
modelXbrl.error(("EFM.6.22.00", "GFM.1.1.3", "SBR.NL.2.1.0.06" if normalizedUri.startswith("http") else "SBR.NL.2.2.0.17"),
_("Prohibited file for filings %(blockedIndicator)s: %(url)s"),
edgarCode="cp-2200-Prohibited-Href-Or-Schema-Location",
modelObject=referringElement, url=normalizedUri,
blockedIndicator=_(" blocked") if blocked else "",
messageCodes=("EFM.6.22.00", "GFM.1.1.3", "SBR.NL.2.1.0.06", "SBR.NL.2.2.0.17"))
#modelXbrl.debug("EFM.6.22.02", "traceback %(traceback)s",
# modeObject=referringElement, traceback=traceback.format_stack())
modelXbrl.urlUnloadableDocs[normalizedUri] = blocked
if blocked:
return None
if modelXbrl.modelManager.skipLoading and modelXbrl.modelManager.skipLoading.match(normalizedUri):
return None
if modelXbrl.fileSource.isMappedUrl(normalizedUri):
mappedUri = modelXbrl.fileSource.mappedUrl(normalizedUri)
elif PackageManager.isMappedUrl(normalizedUri):
mappedUri = PackageManager.mappedUrl(normalizedUri)
else:
mappedUri = modelXbrl.modelManager.disclosureSystem.mappedUrl(normalizedUri)
if isEntry:
modelXbrl.entryLoadingUrl = mappedUri # for error loggiong during loading
# don't try reloading if not loadable
if modelXbrl.fileSource.isInArchive(mappedUri):
filepath = mappedUri
else:
filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(mappedUri, reload=reloadCache, checkModifiedTime=kwargs.get("checkModifiedTime",False))
if filepath:
uri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(filepath)
if filepath is None: # error such as HTTPerror is already logged
if modelXbrl.modelManager.abortOnMajorError and (isEntry or isDiscovered):
modelXbrl.error("FileNotLoadable",
_("File can not be loaded: %(fileName)s \nLoading terminated."),
modelObject=referringElement, fileName=mappedUri)
raise LoadingException()
if normalizedUri not in modelXbrl.urlUnloadableDocs:
if "referringElementUrl" in kwargs:
modelXbrl.error("FileNotLoadable",
_("File can not be loaded: %(fileName)s, referenced from %(referencingFileName)s"),
modelObject=referringElement, fileName=normalizedUri, referencingFileName=kwargs["referringElementUrl"])
else:
modelXbrl.error("FileNotLoadable",
_("File can not be loaded: %(fileName)s"),
modelObject=referringElement, fileName=normalizedUri)
modelXbrl.urlUnloadableDocs[normalizedUri] = True # always blocked if not loadable on this error
return None
isPullLoadable = any(pluginMethod(modelXbrl, mappedUri, normalizedUri, filepath, isEntry=isEntry, namespace=namespace, **kwargs)
for pluginMethod in pluginClassMethods("ModelDocument.IsPullLoadable"))
if not isPullLoadable and os.path.splitext(filepath)[1] in (".xlsx", ".xls", ".csv", ".json"):
modelXbrl.error("FileNotLoadable",
_("File can not be loaded, requires loadFromExcel or loadFromOIM plug-in: %(fileName)s"),
modelObject=referringElement, fileName=normalizedUri)
return None
# load XML and determine type of model document
modelXbrl.modelManager.showStatus(_("parsing {0}").format(uri))
file = None
try:
for pluginMethod in pluginClassMethods("ModelDocument.PullLoader"):
# assumes not possible to check file in string format or not all available at once
modelDocument = pluginMethod(modelXbrl, normalizedUri, filepath, isEntry=isEntry, namespace=namespace, **kwargs)
if isinstance(modelDocument, Exception):
return None
if modelDocument is not None:
return modelDocument
if (modelXbrl.modelManager.validateDisclosureSystem and (
(isEntry and modelXbrl.modelManager.disclosureSystem.validateEntryText) or
(modelXbrl.modelManager.disclosureSystem.validateFileText and
not normalizedUri in modelXbrl.modelManager.disclosureSystem.standardTaxonomiesDict))):
file, _encoding = ValidateFilingText.checkfile(modelXbrl,filepath)
else:
file, _encoding = modelXbrl.fileSource.file(filepath, stripDeclaration=True)
xmlDocument = None
isPluginParserDocument = False
for pluginMethod in pluginClassMethods("ModelDocument.CustomLoader"):
modelDocument = pluginMethod(modelXbrl, file, mappedUri, filepath)
if modelDocument is not None:
file.close()
return modelDocument
_parser, _parserLookupName, _parserLookupClass = parser(modelXbrl,normalizedUri)
xmlDocument = etree.parse(file,parser=_parser,base_url=filepath)
for error in _parser.error_log:
modelXbrl.error("xmlSchema:syntax",
_("%(error)s, %(fileName)s, line %(line)s, column %(column)s"),
modelObject=(referringElement, os.path.basename(uri)),
fileName=os.path.basename(uri),
error=error.message, line=error.line, column=error.column)
file.close()
except (EnvironmentError, KeyError, UnicodeDecodeError) as err: # missing zip file raises KeyError
if file:
file.close()
# retry in case of well known schema locations
if not isIncluded and namespace and namespace in XbrlConst.standardNamespaceSchemaLocations and uri != XbrlConst.standardNamespaceSchemaLocations[namespace]:
return load(modelXbrl, XbrlConst.standardNamespaceSchemaLocations[namespace],
base, referringElement, isEntry, isDiscovered, isIncluded, namespace, reloadCache)
if modelXbrl.modelManager.abortOnMajorError and (isEntry or isDiscovered):
modelXbrl.error("IOerror",
_("%(fileName)s: file error: %(error)s \nLoading terminated."),
modelObject=referringElement, fileName=os.path.basename(uri), error=str(err))
raise LoadingException()
#import traceback
#print("traceback {}".format(traceback.format_tb(sys.exc_info()[2])))
modelXbrl.error("IOerror",
_("%(fileName)s: file error: %(error)s"),
modelObject=referringElement, fileName=os.path.basename(uri), error=str(err))
modelXbrl.urlUnloadableDocs[normalizedUri] = True # not loadable due to IO issue
return None
except (etree.LxmlError, etree.XMLSyntaxError,
SAXParseException,
ValueError) as err: # ValueError raised on bad format of qnames, xmlns'es, or parameters
if file:
file.close()
if not isEntry and str(err) == "Start tag expected, '<' not found, line 1, column 1":
return ModelDocument(modelXbrl, Type.UnknownNonXML, normalizedUri, filepath, None)
else:
modelXbrl.error("xmlSchema:syntax",
_("Unrecoverable error: %(error)s, %(fileName)s"),
modelObject=(referringElement, os.path.basename(uri)), fileName=os.path.basename(uri),
error=str(err), exc_info=True)
modelXbrl.urlUnloadableDocs[normalizedUri] = True # not loadable due to parser issues
return None
except Exception as err:
modelXbrl.error(type(err).__name__,
_("Unrecoverable error: %(error)s, %(fileName)s"),
modelObject=referringElement, fileName=os.path.basename(uri),
error=str(err), exc_info=True)
modelXbrl.urlUnloadableDocs[normalizedUri] = True # not loadable due to exception issue
return None
# identify document
#modelXbrl.modelManager.addToLog("discovery: {0}".format(
# os.path.basename(uri)))
modelXbrl.modelManager.showStatus(_("loading {0}").format(uri))
modelDocument = None
rootNode = xmlDocument.getroot()
if rootNode is not None:
ln = rootNode.localName
ns = rootNode.namespaceURI
# type classification
_type = None
_class = ModelDocument
if ns == XbrlConst.xsd and ln == "schema":
_type = Type.SCHEMA
if not isEntry and not isIncluded:
# check if already loaded under a different url
targetNamespace = rootNode.get("targetNamespace")
if targetNamespace and modelXbrl.namespaceDocs.get(targetNamespace):
otherModelDoc = modelXbrl.namespaceDocs[targetNamespace][0]
if otherModelDoc.basename == os.path.basename(uri):
if os.path.normpath(otherModelDoc.uri) != os.path.normpath(uri): # tolerate \ vs / or ../ differences
modelXbrl.urlDocs[uri] = otherModelDoc
modelXbrl.warning("info:duplicatedSchema",
_("Schema file with same targetNamespace %(targetNamespace)s loaded from %(fileName)s and %(otherFileName)s"),
modelObject=referringElement, targetNamespace=targetNamespace, fileName=uri, otherFileName=otherModelDoc.uri)
return otherModelDoc
elif (isEntry or isDiscovered or isSupplemental) and ns == XbrlConst.link:
if ln == "linkbase":
_type = Type.LINKBASE
elif ln == "xbrl":
_type = Type.INSTANCE
else:
_type = Type.UnknownXML
elif isEntry and ns == XbrlConst.xbrli:
if ln == "xbrl":
_type = Type.INSTANCE
else:
_type = Type.UnknownXML
elif ns == XbrlConst.xhtml and \
(ln == "html" or ln == "xhtml"):
_type = Type.UnknownXML
if (# not a valid test: XbrlConst.ixbrlAll & set(rootNode.nsmap.values()) or
any(e is not None for e in rootNode.iter(*XbrlConst.ixbrlTags))):
_type = Type.INLINEXBRL
elif ln == "report" and ns == XbrlConst.ver:
_type = Type.VERSIONINGREPORT
from arelle.ModelVersReport import ModelVersReport
_class = ModelVersReport
elif ln in ("testcases", "documentation", "testSuite", "registries"):
_type = Type.TESTCASESINDEX
elif ln in ("testcase", "testSet"):
_type = Type.TESTCASE
elif ln == "registry" and ns == XbrlConst.registry:
_type = Type.REGISTRY
elif ln == "test-suite" and ns == "http://www.w3.org/2005/02/query-test-XQTSCatalog":
_type = Type.XPATHTESTSUITE
elif ln == "rss":
_type = Type.RSSFEED
from arelle.ModelRssObject import ModelRssObject
_class = ModelRssObject
elif ln == "ptvl":
_type = Type.ARCSINFOSET
elif ln == "facts":
_type = Type.FACTDIMSINFOSET
elif (# not a valid test: XbrlConst.ixbrlAll & set(rootNode.nsmap.values()) or
any(e is not None for e in rootNode.iter(*XbrlConst.ixbrlTags))):
# any xml document can be an inline document, only html and xhtml are found above
_type = Type.INLINEXBRL
else:
for pluginMethod in pluginClassMethods("ModelDocument.IdentifyType"):
_identifiedType = pluginMethod(modelXbrl, rootNode, filepath)
if _identifiedType is not None:
_type, _class, rootNode = _identifiedType
break
if _type is None:
_type = Type.UnknownXML
nestedInline = None
for htmlElt in rootNode.iter(tag="{http://www.w3.org/1999/xhtml}html"):
nestedInline = htmlElt
break
if nestedInline is None:
for htmlElt in rootNode.iter(tag="{http://www.w3.org/1999/xhtml}xhtml"):
nestedInline = htmlElt
break
if nestedInline is not None:
if (# not a valid test: XbrlConst.ixbrl in nestedInline.nsmap.values() or
any(e is not None for e in rootNode.iter(*XbrlConst.ixbrlTags))):
_type = Type.INLINEXBRL
rootNode = nestedInline
modelDocument = _class(modelXbrl, _type, normalizedUri, filepath, xmlDocument)
rootNode.init(modelDocument)
modelDocument.parser = _parser # needed for XmlUtil addChild's makeelement
modelDocument.parserLookupName = _parserLookupName
modelDocument.parserLookupClass = _parserLookupClass
modelDocument.xmlRootElement = modelDocument.targetXbrlRootElement = rootNode
modelDocument.schemaLocationElements.add(rootNode)
modelDocument.documentEncoding = _encoding
if isEntry or isDiscovered:
modelDocument.inDTS = True
# discovery (parsing)
if any(pluginMethod(modelDocument)
for pluginMethod in pluginClassMethods("ModelDocument.Discover")):
pass # discovery was performed by plug-in, we're done
elif _type == Type.SCHEMA:
modelDocument.schemaDiscover(rootNode, isIncluded, isSupplemental, namespace)
elif _type == Type.LINKBASE:
modelDocument.linkbaseDiscover(rootNode)
elif _type == Type.INSTANCE:
modelDocument.instanceDiscover(rootNode)
elif _type == Type.INLINEXBRL:
try:
modelDocument.inlineXbrlDiscover(rootNode)
except RecursionError as err:
schemaErrorCount = sum(e == "xmlSchema:syntax" for e in modelXbrl.errors)
if schemaErrorCount > 100: # arbitrary count, in case of tons of unclosed or mismatched xhtml start-end elements
modelXbrl.error("html:unprocessable",
_("%(element)s error, unable to process html syntax due to %(schemaErrorCount)s schema syntax errors"),
modelObject=rootNode, element=rootNode.localName.title(), schemaErrorCount=schemaErrorCount)
else:
self.modelXbrl.error("html:validationException",
_("%(element)s error %(error)s, unable to process html."),
modelObject=rootNode, element=rootNode.localName.title(), error=type(err).__name__)
return None # rootNode is not processed further to find any facts because there could be many recursion errors
elif _type == Type.VERSIONINGREPORT:
modelDocument.versioningReportDiscover(rootNode)
elif _type == Type.TESTCASESINDEX:
modelDocument.testcasesIndexDiscover(xmlDocument)
elif _type == Type.TESTCASE:
modelDocument.testcaseDiscover(rootNode)
elif _type == Type.REGISTRY:
modelDocument.registryDiscover(rootNode)
elif _type == Type.XPATHTESTSUITE:
modelDocument.xPathTestSuiteDiscover(rootNode)
elif _type == Type.VERSIONINGREPORT:
modelDocument.versioningReportDiscover(rootNode)
elif _type == Type.RSSFEED:
modelDocument.rssFeedDiscover(rootNode)
if isEntry or _type == Type.INLINEXBRL: # inline doc set members may not be entry but may have processing instructions
for pi in modelDocument.processingInstructions:
if pi.target == "arelle-unit-test":
modelXbrl.arelleUnitTests[pi.get("location")] = pi.get("action")
if isEntry:
while modelXbrl.schemaDocsToValidate:
doc = modelXbrl.schemaDocsToValidate.pop()
XmlValidateSchema.validate(doc, doc.xmlRootElement, doc.targetNamespace) # validate schema elements
if hasattr(modelXbrl, "ixdsHtmlElements"):
inlineIxdsDiscover(modelXbrl, modelDocument) # compile cross-document IXDS references
if isEntry or isSupplemental:
# re-order base set keys for entry point or supplemental linkbase addition
modelXbrl.baseSets = OrderedDefaultDict( # order by linkRole, arcRole of key
modelXbrl.baseSets.default_factory,
sorted(modelXbrl.baseSets.items(), key=lambda i: (i[0][0] or "",i[0][1] or "")))
return modelDocument
|
def load(modelXbrl, uri, base=None, referringElement=None, isEntry=False, isDiscovered=False, isIncluded=None, isSupplemental=False, namespace=None, reloadCache=False, **kwargs):
"""Returns a new modelDocument, performing DTS discovery for instance, inline XBRL, schema,
linkbase, and versioning report entry urls.
:param uri: Identification of file to load by string filename or by a FileSource object with a selected content file.
:type uri: str or FileSource
:param referringElement: Source element causing discovery or loading of this document, such as an import or xlink:href
:type referringElement: ModelObject
:param isEntry: True for an entry document
:type isEntry: bool
:param isDiscovered: True if this document is discovered by XBRL rules, otherwise False (such as when schemaLocation and xmlns were the cause of loading the schema)
:type isDiscovered: bool
:param isIncluded: True if this document is the target of an xs:include
:type isIncluded: bool
:param isSupplemental: True if this is processed for link relationships even if neither isEntry or isDiscovered, such as when adding additional language or documentation linkbases
:type isIncluded: bool
:param namespace: The schema namespace of this document, if known and applicable
:type isSupplemental: True if this document is supplemental (not discovered or in DTS but adds labels or instance facts)
:type namespace: str
:param reloadCache: True if desired to reload the web cache for any web-referenced files.
:type reloadCache: bool
:param checkModifiedTime: True if desired to check modifed time of web cached entry point (ahead of usual time stamp checks).
:type checkModifiedTime: bool
"""
if referringElement is None: # used for error messages
referringElement = modelXbrl
normalizedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(uri, base)
modelDocument = modelXbrl.urlDocs.get(normalizedUri)
if modelDocument:
return modelDocument
elif modelXbrl.urlUnloadableDocs.get(normalizedUri): # only return None if in this list and marked True (really not loadable)
return None
elif not normalizedUri:
modelXbrl.error("FileNotLoadable",
_("File name absent, document can not be loaded."),
modelObject=referringElement, fileName=normalizedUri)
return None
if isEntry:
modelXbrl.entryLoadingUrl = normalizedUri # for error loggiong during loading
modelXbrl.uri = normalizedUri
modelXbrl.uriDir = os.path.dirname(normalizedUri)
for i in range(modelXbrl.modelManager.disclosureSystem.maxSubmissionSubdirectoryEntryNesting):
modelXbrl.uriDir = os.path.dirname(modelXbrl.uriDir)
if modelXbrl.modelManager.validateDisclosureSystem and \
not normalizedUri.startswith(modelXbrl.uriDir) and \
not modelXbrl.modelManager.disclosureSystem.hrefValid(normalizedUri):
blocked = modelXbrl.modelManager.disclosureSystem.blockDisallowedReferences
if normalizedUri not in modelXbrl.urlUnloadableDocs:
# HMRC note, HMRC.blockedFile should be in this list if hmrc-taxonomies.xml is maintained an dup to date
modelXbrl.error(("EFM.6.22.00", "GFM.1.1.3", "SBR.NL.2.1.0.06" if normalizedUri.startswith("http") else "SBR.NL.2.2.0.17"),
_("Prohibited file for filings %(blockedIndicator)s: %(url)s"),
edgarCode="cp-2200-Prohibited-Href-Or-Schema-Location",
modelObject=referringElement, url=normalizedUri,
blockedIndicator=_(" blocked") if blocked else "",
messageCodes=("EFM.6.22.00", "GFM.1.1.3", "SBR.NL.2.1.0.06", "SBR.NL.2.2.0.17"))
#modelXbrl.debug("EFM.6.22.02", "traceback %(traceback)s",
# modeObject=referringElement, traceback=traceback.format_stack())
modelXbrl.urlUnloadableDocs[normalizedUri] = blocked
if blocked:
return None
if modelXbrl.modelManager.skipLoading and modelXbrl.modelManager.skipLoading.match(normalizedUri):
return None
if modelXbrl.fileSource.isMappedUrl(normalizedUri):
mappedUri = modelXbrl.fileSource.mappedUrl(normalizedUri)
elif PackageManager.isMappedUrl(normalizedUri):
mappedUri = PackageManager.mappedUrl(normalizedUri)
else:
mappedUri = modelXbrl.modelManager.disclosureSystem.mappedUrl(normalizedUri)
if isEntry:
modelXbrl.entryLoadingUrl = mappedUri # for error loggiong during loading
# don't try reloading if not loadable
if modelXbrl.fileSource.isInArchive(mappedUri):
filepath = mappedUri
else:
filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(mappedUri, reload=reloadCache, checkModifiedTime=kwargs.get("checkModifiedTime",False))
if filepath:
uri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(filepath)
if filepath is None: # error such as HTTPerror is already logged
if modelXbrl.modelManager.abortOnMajorError and (isEntry or isDiscovered):
modelXbrl.error("FileNotLoadable",
_("File can not be loaded: %(fileName)s \nLoading terminated."),
modelObject=referringElement, fileName=mappedUri)
raise LoadingException()
if normalizedUri not in modelXbrl.urlUnloadableDocs:
if "referringElementUrl" in kwargs:
modelXbrl.error("FileNotLoadable",
_("File can not be loaded: %(fileName)s, referenced from %(referencingFileName)s"),
modelObject=referringElement, fileName=normalizedUri, referencingFileName=kwargs["referringElementUrl"])
else:
modelXbrl.error("FileNotLoadable",
_("File can not be loaded: %(fileName)s"),
modelObject=referringElement, fileName=normalizedUri)
modelXbrl.urlUnloadableDocs[normalizedUri] = True # always blocked if not loadable on this error
return None
isPullLoadable = any(pluginMethod(modelXbrl, mappedUri, normalizedUri, filepath, isEntry=isEntry, namespace=namespace, **kwargs)
for pluginMethod in pluginClassMethods("ModelDocument.IsPullLoadable"))
if not isPullLoadable and os.path.splitext(filepath)[1] in (".xlsx", ".xls", ".csv", ".json"):
modelXbrl.error("FileNotLoadable",
_("File can not be loaded, requires loadFromExcel or loadFromOIM plug-in: %(fileName)s"),
modelObject=referringElement, fileName=normalizedUri)
return None
# load XML and determine type of model document
modelXbrl.modelManager.showStatus(_("parsing {0}").format(uri))
file = None
try:
for pluginMethod in pluginClassMethods("ModelDocument.PullLoader"):
# assumes not possible to check file in string format or not all available at once
modelDocument = pluginMethod(modelXbrl, normalizedUri, filepath, isEntry=isEntry, namespace=namespace, **kwargs)
if isinstance(modelDocument, Exception):
return None
if modelDocument is not None:
return modelDocument
if (modelXbrl.modelManager.validateDisclosureSystem and (
(isEntry and modelXbrl.modelManager.disclosureSystem.validateEntryText) or
(modelXbrl.modelManager.disclosureSystem.validateFileText and
not normalizedUri in modelXbrl.modelManager.disclosureSystem.standardTaxonomiesDict))):
file, _encoding = ValidateFilingText.checkfile(modelXbrl,filepath)
else:
file, _encoding = modelXbrl.fileSource.file(filepath, stripDeclaration=True)
xmlDocument = None
isPluginParserDocument = False
for pluginMethod in pluginClassMethods("ModelDocument.CustomLoader"):
modelDocument = pluginMethod(modelXbrl, file, mappedUri, filepath)
if modelDocument is not None:
file.close()
return modelDocument
_parser, _parserLookupName, _parserLookupClass = parser(modelXbrl,normalizedUri)
xmlDocument = etree.parse(file,parser=_parser,base_url=filepath)
for error in _parser.error_log:
modelXbrl.error("xmlSchema:syntax",
_("%(error)s, %(fileName)s, line %(line)s, column %(column)s"),
modelObject=(referringElement, os.path.basename(uri)),
fileName=os.path.basename(uri),
error=error.message, line=error.line, column=error.column)
file.close()
except (EnvironmentError, KeyError, UnicodeDecodeError) as err: # missing zip file raises KeyError
if file:
file.close()
# retry in case of well known schema locations
if not isIncluded and namespace and namespace in XbrlConst.standardNamespaceSchemaLocations and uri != XbrlConst.standardNamespaceSchemaLocations[namespace]:
return load(modelXbrl, XbrlConst.standardNamespaceSchemaLocations[namespace],
base, referringElement, isEntry, isDiscovered, isIncluded, namespace, reloadCache)
if modelXbrl.modelManager.abortOnMajorError and (isEntry or isDiscovered):
modelXbrl.error("IOerror",
_("%(fileName)s: file error: %(error)s \nLoading terminated."),
modelObject=referringElement, fileName=os.path.basename(uri), error=str(err))
raise LoadingException()
#import traceback
#print("traceback {}".format(traceback.format_tb(sys.exc_info()[2])))
modelXbrl.error("IOerror",
_("%(fileName)s: file error: %(error)s"),
modelObject=referringElement, fileName=os.path.basename(uri), error=str(err))
modelXbrl.urlUnloadableDocs[normalizedUri] = True # not loadable due to IO issue
return None
except (etree.LxmlError, etree.XMLSyntaxError,
SAXParseException,
ValueError) as err: # ValueError raised on bad format of qnames, xmlns'es, or parameters
if file:
file.close()
if not isEntry and str(err) == "Start tag expected, '<' not found, line 1, column 1":
return ModelDocument(modelXbrl, Type.UnknownNonXML, normalizedUri, filepath, None)
else:
modelXbrl.error("xmlSchema:syntax",
_("Unrecoverable error: %(error)s, %(fileName)s"),
modelObject=(referringElement, os.path.basename(uri)), fileName=os.path.basename(uri),
error=str(err), exc_info=True)
modelXbrl.urlUnloadableDocs[normalizedUri] = True # not loadable due to parser issues
return None
except Exception as err:
modelXbrl.error(type(err).__name__,
_("Unrecoverable error: %(error)s, %(fileName)s"),
modelObject=referringElement, fileName=os.path.basename(uri),
error=str(err), exc_info=True)
modelXbrl.urlUnloadableDocs[normalizedUri] = True # not loadable due to exception issue
return None
# identify document
#modelXbrl.modelManager.addToLog("discovery: {0}".format(
# os.path.basename(uri)))
modelXbrl.modelManager.showStatus(_("loading {0}").format(uri))
modelDocument = None
rootNode = xmlDocument.getroot()
if rootNode is not None:
ln = rootNode.localName
ns = rootNode.namespaceURI
# type classification
_type = None
_class = ModelDocument
if ns == XbrlConst.xsd and ln == "schema":
_type = Type.SCHEMA
if not isEntry and not isIncluded:
# check if already loaded under a different url
targetNamespace = rootNode.get("targetNamespace")
if targetNamespace and modelXbrl.namespaceDocs.get(targetNamespace):
otherModelDoc = modelXbrl.namespaceDocs[targetNamespace][0]
if otherModelDoc.basename == os.path.basename(uri):
if os.path.normpath(otherModelDoc.uri) != os.path.normpath(uri): # tolerate \ vs / or ../ differences
modelXbrl.urlDocs[uri] = otherModelDoc
modelXbrl.warning("info:duplicatedSchema",
_("Schema file with same targetNamespace %(targetNamespace)s loaded from %(fileName)s and %(otherFileName)s"),
modelObject=referringElement, targetNamespace=targetNamespace, fileName=uri, otherFileName=otherModelDoc.uri)
return otherModelDoc
elif (isEntry or isDiscovered or isSupplemental) and ns == XbrlConst.link:
if ln == "linkbase":
_type = Type.LINKBASE
elif ln == "xbrl":
_type = Type.INSTANCE
else:
_type = Type.UnknownXML
elif isEntry and ns == XbrlConst.xbrli:
if ln == "xbrl":
_type = Type.INSTANCE
else:
_type = Type.UnknownXML
elif ns == XbrlConst.xhtml and \
(ln == "html" or ln == "xhtml"):
_type = Type.UnknownXML
if (# not a valid test: XbrlConst.ixbrlAll & set(rootNode.nsmap.values()) or
any(e is not None for e in rootNode.iter(*XbrlConst.ixbrlTags))):
_type = Type.INLINEXBRL
elif ln == "report" and ns == XbrlConst.ver:
_type = Type.VERSIONINGREPORT
from arelle.ModelVersReport import ModelVersReport
_class = ModelVersReport
elif ln in ("testcases", "documentation", "testSuite", "registries"):
_type = Type.TESTCASESINDEX
elif ln in ("testcase", "testSet"):
_type = Type.TESTCASE
elif ln == "registry" and ns == XbrlConst.registry:
_type = Type.REGISTRY
elif ln == "test-suite" and ns == "http://www.w3.org/2005/02/query-test-XQTSCatalog":
_type = Type.XPATHTESTSUITE
elif ln == "rss":
_type = Type.RSSFEED
from arelle.ModelRssObject import ModelRssObject
_class = ModelRssObject
elif ln == "ptvl":
_type = Type.ARCSINFOSET
elif ln == "facts":
_type = Type.FACTDIMSINFOSET
elif (# not a valid test: XbrlConst.ixbrlAll & set(rootNode.nsmap.values()) or
any(e is not None for e in rootNode.iter(*XbrlConst.ixbrlTags))):
# any xml document can be an inline document, only html and xhtml are found above
_type = Type.INLINEXBRL
else:
for pluginMethod in pluginClassMethods("ModelDocument.IdentifyType"):
_identifiedType = pluginMethod(modelXbrl, rootNode, filepath)
if _identifiedType is not None:
_type, _class, rootNode = _identifiedType
break
if _type is None:
_type = Type.UnknownXML
nestedInline = None
for htmlElt in rootNode.iter(tag="{http://www.w3.org/1999/xhtml}html"):
nestedInline = htmlElt
break
if nestedInline is None:
for htmlElt in rootNode.iter(tag="{http://www.w3.org/1999/xhtml}xhtml"):
nestedInline = htmlElt
break
if nestedInline is not None:
if (# not a valid test: XbrlConst.ixbrl in nestedInline.nsmap.values() or
any(e is not None for e in rootNode.iter(*XbrlConst.ixbrlTags))):
_type = Type.INLINEXBRL
rootNode = nestedInline
modelDocument = _class(modelXbrl, _type, normalizedUri, filepath, xmlDocument)
rootNode.init(modelDocument)
modelDocument.parser = _parser # needed for XmlUtil addChild's makeelement
modelDocument.parserLookupName = _parserLookupName
modelDocument.parserLookupClass = _parserLookupClass
modelDocument.xmlRootElement = modelDocument.targetXbrlRootElement = rootNode
modelDocument.schemaLocationElements.add(rootNode)
modelDocument.documentEncoding = _encoding
if isEntry or isDiscovered:
modelDocument.inDTS = True
# discovery (parsing)
if any(pluginMethod(modelDocument)
for pluginMethod in pluginClassMethods("ModelDocument.Discover")):
pass # discovery was performed by plug-in, we're done
elif _type == Type.SCHEMA:
modelDocument.schemaDiscover(rootNode, isIncluded, isSupplemental, namespace)
elif _type == Type.LINKBASE:
modelDocument.linkbaseDiscover(rootNode)
elif _type == Type.INSTANCE:
modelDocument.instanceDiscover(rootNode)
elif _type == Type.INLINEXBRL:
try:
modelDocument.inlineXbrlDiscover(rootNode)
except RecursionError as err:
schemaErrorCount = modelXbrl.errors.count("xmlSchema:syntax")
if schemaErrorCount > 100: # arbitrary count, in case of tons of unclosed or mismatched xhtml start-end elements
modelXbrl.error("html:unprocessable",
_("%(element)s error, unable to process html syntax due to %(schemaErrorCount)s schema syntax errors"),
modelObject=rootNode, element=rootNode.localName.title(), schemaErrorCount=schemaErrorCount)
else:
self.modelXbrl.error("html:validationException",
_("%(element)s error %(error)s, unable to process html."),
modelObject=rootNode, element=rootNode.localName.title(), error=type(err).__name__)
return None # rootNode is not processed further to find any facts because there could be many recursion errors
elif _type == Type.VERSIONINGREPORT:
modelDocument.versioningReportDiscover(rootNode)
elif _type == Type.TESTCASESINDEX:
modelDocument.testcasesIndexDiscover(xmlDocument)
elif _type == Type.TESTCASE:
modelDocument.testcaseDiscover(rootNode)
elif _type == Type.REGISTRY:
modelDocument.registryDiscover(rootNode)
elif _type == Type.XPATHTESTSUITE:
modelDocument.xPathTestSuiteDiscover(rootNode)
elif _type == Type.VERSIONINGREPORT:
modelDocument.versioningReportDiscover(rootNode)
elif _type == Type.RSSFEED:
modelDocument.rssFeedDiscover(rootNode)
if isEntry or _type == Type.INLINEXBRL: # inline doc set members may not be entry but may have processing instructions
for pi in modelDocument.processingInstructions:
if pi.target == "arelle-unit-test":
modelXbrl.arelleUnitTests[pi.get("location")] = pi.get("action")
if isEntry:
while modelXbrl.schemaDocsToValidate:
doc = modelXbrl.schemaDocsToValidate.pop()
XmlValidateSchema.validate(doc, doc.xmlRootElement, doc.targetNamespace) # validate schema elements
if hasattr(modelXbrl, "ixdsHtmlElements"):
inlineIxdsDiscover(modelXbrl, modelDocument) # compile cross-document IXDS references
if isEntry or isSupplemental:
# re-order base set keys for entry point or supplemental linkbase addition
modelXbrl.baseSets = OrderedDefaultDict( # order by linkRole, arcRole of key
modelXbrl.baseSets.default_factory,
sorted(modelXbrl.baseSets.items(), key=lambda i: (i[0][0] or "",i[0][1] or "")))
return modelDocument
|
31,174 |
def create_account_context(endpoints):
account_context = []
for endpoint in endpoints:
domain = endpoint.get('domain')
if domain:
for user in endpoint.get('users', []):
account_context.append({
'Username': user,
'Domain': domain
})
return account_context
|
def create_account_context(endpoints):
account_context = []
for endpoint in endpoints:
domain = endpoint.get('domain')
if domain:
for user in endpoint.get('users', []):
account_context.append({
'Username': user,
'Domain': domain,
})
return account_context
|
7,204 |
def euler_number(image, connectivity=None):
"""Calculate the Euler characteristic in binary image.
A neighbourhood configuration is constructed, and a LUT is applied for
each configuration.
Parameters
----------
image: (N, M) ndarray or (N, M, D) ndarray.
2D or 3D images.
If image is not binary, all values strictly greater than zero
are considered as the object.
connectivity : int, optional
Maximum number of orthogonal hops to consider a pixel/voxel
as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
4 or 8 neighborhoods are defined for 2D images (connectivity 1 and 2,
respectively).
6 or 26 neighborhoods are defined for 3D images, (connectivity 1 and 3,
respectively). Connectivity 2 is not defined.
Returns
-------
euler_number : int
Euler characteristic of the set of all objects in the image.
Notes
-----
The Euler characteristic is an integer number that describes the
topology of the set of all objects in the input image. If object is
4-connected, then background is 8-connected, and conversely.
References
----------
.. [1] S. Rivollier. Analyse d’image geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010. Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
.. [2] Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of
Discretized Sets - On the Choice of Adjacency in Homogeneous
Lattices. In: Mecke K., Stoyan D. (eds) Morphology of Condensed
Matter. Lecture Notes in Physics, vol 600. Springer, Berlin,
Heidelberg.
Examples
--------
>>> import numpy as np
>>> SAMPLE = np.zeros((100,100,100));
>>> SAMPLE[40:60, 40:60, 40:60]=1
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
1...
>>> SAMPLE[45:55,45:55,45:55] = 0;
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
2...
>>> SAMPLE = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
... [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])
>>> euler_number(SAMPLE) # doctest:
0
>>> euler_number(SAMPLE, connectivity=1) # doctest:
2
"""
# as image can be a label image, transform it to binary
image = (image > 0).astype(np.int)
image = pad(image, ((1, 1),), mode='constant')
# check connectivity
if connectivity is None:
connectivity = image.ndim
if image.ndim == 3 and connectivity == 2:
raise NotImplementedError('For 3D images, Euler number is implemented '
'for connectivities 1 and 3 only')
# config variable is an adjacency configuration. A coefficient given by
# variable coefs is attributed to each configuration in order to get
# the Euler characteristic.
if image.ndim == 2:
config = np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]])
if connectivity == 1:
coefs = [0, 1, 0, 0, 0, 0, 0,
-1, 0, 1, 0, 0, 0, 0, 0, 0]
else:
coefs = [0, 0, 0, 0, 0, 0, -1,
0, 1, 0, 0, 0, 0, 0, -1, 0]
bins = 16
else: # 3D images
config = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 4], [0, 2, 8]],
[[0, 0, 0], [0, 16, 64], [0, 32, 128]]])
coefs26 = np.array([0, 1, 1, 0, 1, 0, -2, -1,
1, -2, 0, -1, 0, -1, -1, 0,
1, 0, -2, -1, -2, -1, -1, -2,
-6, -3, -3, -2, -3, -2, 0, -1,
1, -2, 0, -1, -6, -3, -3, -2,
-2, -1, -1, -2, -3, 0, -2, -1,
0, -1, -1, 0, -3, -2, 0, -1,
-3, 0, -2, -1, 0, 1, 1, 0,
1, -2, -6, -3, 0, -1, -3, -2,
-2, -1, -3, 0, -1, -2, -2, -1,
0, -1, -3, -2, -1, 0, 0, -1,
-3, 0, 0, 1, -2, -1, 1, 0,
-2, -1, -3, 0, -3, 0, 0, 1,
-1, 4, 0, 3, 0, 3, 1, 2,
-1, -2, -2, -1, -2, -1, 1,
0, 0, 3, 1, 2, 1, 2, 2, 1,
1, -6, -2, -3, -2, -3, -1, 0,
0, -3, -1, -2, -1, -2, -2, -1,
-2, -3, -1, 0, -1, 0, 4, 3,
-3, 0, 0, 1, 0, 1, 3, 2,
0, -3, -1, -2, -3, 0, 0, 1,
-1, 0, 0, -1, -2, 1, -1, 0,
-1, -2, -2, -1, 0, 1, 3, 2,
-2, 1, -1, 0, 1, 2, 2, 1,
0, -3, -3, 0, -1, -2, 0, 1,
-1, 0, -2, 1, 0, -1, -1, 0,
-1, -2, 0, 1, -2, -1, 3, 2,
-2, 1, 1, 2, -1, 0, 2, 1,
-1, 0, -2, 1, -2, 1, 1, 2,
-2, 3, -1, 2, -1, 2, 0, 1,
0, -1, -1, 0, -1, 0, 2, 1,
-1, 2, 0, 1, 0, 1, 1, 0, ])
if connectivity == 1:
coefs = coefs26[::-1]
else:
coefs = coefs26
bins = 256
XF = ndi.convolve(image, config, mode='constant', cval=0)
h = np.bincount(XF.ravel(), minlength=bins)
if image.ndim == 2:
return coefs@h
else:
return np.int(1./8 * coefs@h)
|
def euler_number(image, connectivity=None):
"""Calculate the Euler characteristic in binary image.
A neighbourhood configuration is constructed, and a LUT is applied for
each configuration.
Parameters
----------
image: (N, M) ndarray or (N, M, D) ndarray.
2D or 3D images.
If image is not binary, all values strictly greater than zero
are considered as the object.
connectivity : int, optional
Maximum number of orthogonal hops to consider a pixel/voxel
as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
4 or 8 neighborhoods are defined for 2D images (connectivity 1 and 2,
respectively).
6 or 26 neighborhoods are defined for 3D images, (connectivity 1 and 3,
respectively). Connectivity 2 is not defined.
Returns
-------
euler_number : int
Euler characteristic of the set of all objects in the image.
Notes
-----
The Euler characteristic is an integer number that describes the
topology of the set of all objects in the input image. If object is
4-connected, then background is 8-connected, and conversely.
References
----------
.. [1] S. Rivollier. Analyse d’image geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010. Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
.. [2] Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of
Discretized Sets - On the Choice of Adjacency in Homogeneous
Lattices. In: Mecke K., Stoyan D. (eds) Morphology of Condensed
Matter. Lecture Notes in Physics, vol 600. Springer, Berlin,
Heidelberg.
Examples
--------
>>> import numpy as np
>>> SAMPLE = np.zeros((100,100,100));
>>> SAMPLE[40:60, 40:60, 40:60]=1
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
1...
>>> SAMPLE[45:55,45:55,45:55] = 0;
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
2...
>>> SAMPLE = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
... [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])
>>> euler_number(SAMPLE) # doctest:
0
>>> euler_number(SAMPLE, connectivity=1) # doctest:
2
"""
# as image can be a label image, transform it to binary
image = (image > 0).astype(np.int)
image = pad(image, ((1, 1),), mode='constant')
# check connectivity
if connectivity is None:
connectivity = image.ndim
if image.ndim == 3 and connectivity == 2:
raise NotImplementedError('For 3D images, Euler number is implemented '
'for connectivities 1 and 3 only')
# config variable is an adjacency configuration. A coefficient given by
# variable coefs is attributed to each configuration in order to get
# the Euler characteristic.
if image.ndim == 2:
config = np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]])
if connectivity == 1:
coefs = [0, 1, 0, 0, 0, 0, 0,
-1, 0, 1, 0, 0, 0, 0, 0, 0]
else:
coefs = [0, 0, 0, 0, 0, 0, -1,
0, 1, 0, 0, 0, 0, 0, -1, 0]
bins = 16
else: # 3D images
config = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 4], [0, 2, 8]],
[[0, 0, 0], [0, 16, 64], [0, 32, 128]]])
coefs26 = np.array([0, 1, 1, 0, 1, 0, -2, -1,
1, -2, 0, -1, 0, -1, -1, 0,
1, 0, -2, -1, -2, -1, -1, -2,
-6, -3, -3, -2, -3, -2, 0, -1,
1, -2, 0, -1, -6, -3, -3, -2,
-2, -1, -1, -2, -3, 0, -2, -1,
0, -1, -1, 0, -3, -2, 0, -1,
-3, 0, -2, -1, 0, 1, 1, 0,
1, -2, -6, -3, 0, -1, -3, -2,
-2, -1, -3, 0, -1, -2, -2, -1,
0, -1, -3, -2, -1, 0, 0, -1,
-3, 0, 0, 1, -2, -1, 1, 0,
-2, -1, -3, 0, -3, 0, 0, 1,
-1, 4, 0, 3, 0, 3, 1, 2,
-1, -2, -2, -1, -2, -1, 1,
0, 0, 3, 1, 2, 1, 2, 2, 1,
1, -6, -2, -3, -2, -3, -1, 0,
0, -3, -1, -2, -1, -2, -2, -1,
-2, -3, -1, 0, -1, 0, 4, 3,
-3, 0, 0, 1, 0, 1, 3, 2,
0, -3, -1, -2, -3, 0, 0, 1,
-1, 0, 0, -1, -2, 1, -1, 0,
-1, -2, -2, -1, 0, 1, 3, 2,
-2, 1, -1, 0, 1, 2, 2, 1,
0, -3, -3, 0, -1, -2, 0, 1,
-1, 0, -2, 1, 0, -1, -1, 0,
-1, -2, 0, 1, -2, -1, 3, 2,
-2, 1, 1, 2, -1, 0, 2, 1,
-1, 0, -2, 1, -2, 1, 1, 2,
-2, 3, -1, 2, -1, 2, 0, 1,
0, -1, -1, 0, -1, 0, 2, 1,
-1, 2, 0, 1, 0, 1, 1, 0, ])
if connectivity == 1:
coefs = coefs26[::-1]
else:
coefs = coefs26
bins = 256
XF = ndi.convolve(image, config, mode='constant', cval=0)
h = np.bincount(XF.ravel(), minlength=bins)
if image.ndim == 2:
return coefs.dot(h)
else:
return np.int(1./8 * coefs@h)
|
29,389 |
def send_mail_to_notify_contributor_ranking_achievement(
contributor_ranking_email_info: (
suggestion_registry.ContributorMilestoneEmailInfo)) -> None:
"""Sends an email to translation/question submitters and reviewers when
they achieve a new rank.
Args:
contributor_ranking_email_info:
ContributorMilestoneEmailInfo. An object with contributor ranking
email information.
"""
if not feconf.CAN_SEND_EMAILS:
logging.error('This app cannot send emails to users.')
return
recipient_username = user_services.get_username(
contributor_ranking_email_info.contributor_user_id)
can_user_receive_email = user_services.get_email_preferences(
contributor_ranking_email_info.contributor_user_id
).can_receive_email_updates
if not can_user_receive_email:
logging.error('This user can not recieve emails.')
return
email_template = NOTIFICATION_FOR_CONTRIBUTOR_RANKING_ACHIEVEMENT[
contributor_ranking_email_info.contribution_type][
contributor_ranking_email_info.contribution_sub_type]
email_body = ''
if contributor_ranking_email_info.contribution_type == (
feconf.CONTRIBUTION_TYPE_TRANSLATION):
language = utils.get_supported_audio_language_description(
contributor_ranking_email_info.language_code)
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
language,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
else:
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
_send_email(
contributor_ranking_email_info.contributor_user_id,
feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS,
email_template['email_subject'], email_body,
feconf.NOREPLY_EMAIL_ADDRESS)
|
def send_mail_to_notify_contributor_ranking_achievement(
contributor_ranking_email_info: (
suggestion_registry.ContributorMilestoneEmailInfo)) -> None:
"""Sends an email to translation/question submitters and reviewers when
they achieve a new rank.
Args:
contributor_ranking_email_info:
ContributorMilestoneEmailInfo. An object with contributor ranking
email information.
"""
if not feconf.CAN_SEND_EMAILS:
logging.error('This app cannot send emails to users.')
return
recipient_username = user_services.get_username(
contributor_ranking_email_info.contributor_user_id)
can_user_receive_email = user_services.get_email_preferences(
contributor_ranking_email_info.contributor_user_id
).can_receive_email_updates
if not can_user_receive_email:
logging.error('This user can not receive emails.')
return
email_template = NOTIFICATION_FOR_CONTRIBUTOR_RANKING_ACHIEVEMENT[
contributor_ranking_email_info.contribution_type][
contributor_ranking_email_info.contribution_sub_type]
email_body = ''
if contributor_ranking_email_info.contribution_type == (
feconf.CONTRIBUTION_TYPE_TRANSLATION):
language = utils.get_supported_audio_language_description(
contributor_ranking_email_info.language_code)
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
language,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
else:
email_body = email_template['email_body_template'] % (
recipient_username,
contributor_ranking_email_info.rank_name,
feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL
)
_send_email(
contributor_ranking_email_info.contributor_user_id,
feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS,
email_template['email_subject'], email_body,
feconf.NOREPLY_EMAIL_ADDRESS)
|
30,456 |
def main():
args = demisto.args() # type: dict
file_path = None
file_entry_id = ''
if args.get('fileName') or args.get('lastZipFileInWarroom'):
entries = demisto.executeCommand('getEntries', {})
for entry in entries:
fn = demisto.get(entry, 'File')
is_text = type(fn) in [unicode, str]
is_correct_file = args.get('fileName', '').lower() == fn.lower()
is_zip = fn.lower().endswith('.zip')
if is_text and is_zip:
if args.get('fileName') and is_correct_file:
file_entry_id = entry['ID']
break
if args.get('lastZipFileInWarroom'):
file_entry_id = entry['ID']
if not file_entry_id:
if args.get('fileName'):
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': args.get('fileName', '') + ' not such file in war room'
})
if args.get('lastZipFileInWarroom'):
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Not found zip file in war room'
})
sys.exit(0)
if 'entryID' in args:
file_entry_id = args.get('entryID') # type: ignore
if not file_entry_id:
return_error('You must set entryID or fileName or lastZipFileInWarroom=true when executing Unzip script')
res = demisto.executeCommand('getFilePath', {'id': file_entry_id})
if res[0]['Type'] == entryTypes['error']:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Failed to get the file path for entry: ' + file_entry_id
})
sys.exit(0)
file_path = res[0]['Contents']['path']
password = args.get('password', None)
filenames = []
# remembering which files and dirs we currently have so we add them later as newly extracted files.
excluded_files = [f for f in os.listdir('.') if isfile(f)]
excluded_dirs = [d for d in os.listdir('.') if isdir(d)]
# extracting the zip file
process = Popen(["7z", "x", "-p{}".format(password), file_path], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if "Wrong password?" in stdout:
demisto.debug(str(stdout))
return_error("Data Error in encrypted file. Wrong password?")
# recursive call over the file system top down
for root, directories, files in os.walk('.'):
# removing the previously existing dirs from the search
directories[:] = [d for d in directories if d not in excluded_dirs]
for f in files:
# skipping previously existing files and verifying that the current file is a file and
# then adding it to the extracted files list
if f not in excluded_files and isfile(os.path.join(root, f)):
filenames.append(os.path.join(root, f))
if len(filenames) == 0:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Could not find files in archive'
})
else:
results = []
# extracted files can be in sub directories so we save the base names of
# the files and also the full path of the file
files_base_names = [os.path.basename(file_path) for file_path in filenames]
files_dic = {file_path: os.path.basename(file_path) for file_path in filenames}
for file_path, file_name in files_dic.items():
demisto.results(file_result_existing_file(file_path, file_name))
results.append(
{
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {'extractedFiles': files_base_names},
'EntryContext': {'ExtractedFiles': files_base_names,
'File(val.EntryID=="' + file_entry_id + '").Unzipped': True},
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Extracted Files',
[{'name': file_name, 'path': file_path} for file_path, file_name in
files_dic.items()])
})
demisto.results(results)
|
def main():
args = demisto.args() # type: dict
file_path = None
file_entry_id = ''
if args.get('fileName') or args.get('lastZipFileInWarroom'):
entries = demisto.executeCommand('getEntries', {})
for entry in entries:
fn = demisto.get(entry, 'File')
is_text = type(fn) in [unicode, str]
is_correct_file = args.get('fileName', '').lower() == fn.lower()
is_zip = fn.lower().endswith('.zip')
if is_text and is_zip:
if args.get('fileName') and is_correct_file:
file_entry_id = entry['ID']
break
if args.get('lastZipFileInWarroom'):
file_entry_id = entry['ID']
if not file_entry_id:
if args.get('fileName'):
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': args.get('fileName', '') + ' not such file in war room'
})
if args.get('lastZipFileInWarroom'):
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Not found zip file in war room'
})
sys.exit(0)
if 'entryID' in args:
file_entry_id = args.get('entryID') # type: ignore
if not file_entry_id:
return_error('You must set entryID or fileName or lastZipFileInWarroom=true when executing Unzip script')
res = demisto.executeCommand('getFilePath', {'id': file_entry_id})
if res[0]['Type'] == entryTypes['error']:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Failed to get the file path for entry: ' + file_entry_id
})
sys.exit(0)
file_path = res[0]['Contents']['path']
password = args.get('password', None)
filenames = []
# remembering which files and dirs we currently have so we add them later as newly extracted files.
excluded_files = [f for f in os.listdir('.') if isfile(f)]
excluded_dirs = [d for d in os.listdir('.') if isdir(d)]
# extracting the zip file
process = Popen(["7z", "x", "-p{}".format(password), file_path], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if 'Wrong password?' in stdout:
demisto.debug(str(stdout))
return_error("Data Error in encrypted file. Wrong password?")
# recursive call over the file system top down
for root, directories, files in os.walk('.'):
# removing the previously existing dirs from the search
directories[:] = [d for d in directories if d not in excluded_dirs]
for f in files:
# skipping previously existing files and verifying that the current file is a file and
# then adding it to the extracted files list
if f not in excluded_files and isfile(os.path.join(root, f)):
filenames.append(os.path.join(root, f))
if len(filenames) == 0:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Could not find files in archive'
})
else:
results = []
# extracted files can be in sub directories so we save the base names of
# the files and also the full path of the file
files_base_names = [os.path.basename(file_path) for file_path in filenames]
files_dic = {file_path: os.path.basename(file_path) for file_path in filenames}
for file_path, file_name in files_dic.items():
demisto.results(file_result_existing_file(file_path, file_name))
results.append(
{
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {'extractedFiles': files_base_names},
'EntryContext': {'ExtractedFiles': files_base_names,
'File(val.EntryID=="' + file_entry_id + '").Unzipped': True},
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Extracted Files',
[{'name': file_name, 'path': file_path} for file_path, file_name in
files_dic.items()])
})
demisto.results(results)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.