id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
35,844 | def test_msg_data_assignment(get_contract):
code = """
@external
def foo() -> Bytes[4]:
x: Bytes[4] = msg.data[4]
return x
"""
contract = get_contract(code)
assert contract.foo().hex() == "c2985578" # fn sig
| def test_msg_data_assignment(get_contract):
code = """
@external
def foo() -> Bytes[4]:
x: Bytes[4] = slice(msg.data, 0, 4) # Throws if `len(msg.data) < 4`
return x
"""
contract = get_contract(code)
assert contract.foo().hex() == "c2985578" # fn sig
|
9,628 | def privileges_get(cursor, user, host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("""GRANT (.+) ON (.+) TO (['`"]).*\\3@(['`"]).*\\4( IDENTIFIED BY PASSWORD (['`"]).+\5)? ?(.*)""", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(7):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(7):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
| def privileges_get(cursor, user, host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("""GRANT (.+) ON (.+) TO (['`"]).*\\3@(['`"]).*\\4( IDENTIFIED BY PASSWORD (['`"]).+\\6)? ?(.*)""", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(7):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(7):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
|
28,598 | def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`az.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to ``ax.scatter`` for divergences
scatter_kwargs:
Additional keywords passed to ``ax.plot`` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
| def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`az.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter` for divergences
scatter_kwargs:
Additional keywords passed to ``ax.plot`` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
|
29,029 | def process_ssh_key(keypair: Mapping, credentials: Credentials):
if len(credentials.identities) != 1:
raise SSHKeyProcessingError(
f"SSH credentials have {len(credentials.identities)}" f" users associated with " f"it!"
)
if not _contains_both_keys(keypair):
raise SSHKeyProcessingError("Private or public key missing!")
# TODO SSH key should be associated with IP that monkey exploited
ip = Monkey.get_single_monkey_by_guid(credentials.monkey_guid).ip_addresses[0]
username = credentials.identities[0]["username"]
encrypted_keys = _encrypt_ssh_keys(keypair)
ConfigService.ssh_add_keys(
user=username,
public_key=encrypted_keys["public_key"],
private_key=encrypted_keys["private_key"],
ip=ip,
)
| def process_ssh_key(keypair: Mapping, credentials: Credentials):
if len(credentials.identities) != 1:
raise SSHKeyProcessingError(
f"SSH credentials have {len(credentials.identities)}" f" users associated with " f"it!"
)
if not _contains_both_keys(keypair):
raise SSHKeyProcessingError("Private or public key missing")
# TODO SSH key should be associated with IP that monkey exploited
ip = Monkey.get_single_monkey_by_guid(credentials.monkey_guid).ip_addresses[0]
username = credentials.identities[0]["username"]
encrypted_keys = _encrypt_ssh_keys(keypair)
ConfigService.ssh_add_keys(
user=username,
public_key=encrypted_keys["public_key"],
private_key=encrypted_keys["private_key"],
ip=ip,
)
|
34,644 | def _convert_lookup_tables_to_regex(
training_data: TrainingData, pattern_names: Optional[List[Text]] = None
) -> List[Dict[Text, Text]]:
"""Convert the lookup tables from the training data to regex patterns.
Args:
training_data: The training data.
pattern_names: List of pattern names to use. If list is empty or None all
patterns will be used.
Returns:
A list of regex patterns.
"""
patterns = []
for table in training_data.lookup_tables:
if pattern_names and table["name"] not in pattern_names:
continue
regex_pattern = _generate_lookup_regex(table)
lookup_regex = {"name": table["name"], "pattern": regex_pattern}
patterns.append(lookup_regex)
return patterns
| def _convert_lookup_tables_to_regex(
training_data: TrainingData, pattern_names: Optional[List[Text]] = None
) -> List[Dict[Text, Text]]:
"""Convert the lookup tables from the training data to regex patterns.
Args:
training_data: The training data.
pattern_names: List of pattern names to use. If list is empty or `None` all
patterns will be used.
Returns:
A list of regex patterns.
"""
patterns = []
for table in training_data.lookup_tables:
if pattern_names and table["name"] not in pattern_names:
continue
regex_pattern = _generate_lookup_regex(table)
lookup_regex = {"name": table["name"], "pattern": regex_pattern}
patterns.append(lookup_regex)
return patterns
|
46,574 | def invalid_cases():
rng = Random(1234)
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
# using mode_max_count, so that the extra byte cannot be picked up as normal list content
yield f'{name}_extra_byte', \
invalid_test_case(lambda: serialize(
container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff')
if len(offsets) != 0:
# Note: there are many more ways to have invalid offsets,
# these are just example to get clients started looking into hardening ssz.
for mode in [RandomizationMode.mode_random,
RandomizationMode.mode_nil_count,
RandomizationMode.mode_one_count,
RandomizationMode.mode_max_count]:
if len(offsets) != 0:
for index, offset_index in enumerate(offsets):
yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x + 1
))
yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: 0
))
if index == 0:
yield f'{name}_{mode.to_name()}_first offset_{offset_index}_minus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x - 1
))
if mode == RandomizationMode.mode_max_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:2]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_overflow', \
invalid_test_case(lambda: serialized)
if mode == RandomizationMode.mode_one_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:1]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \
invalid_test_case(lambda: serialized)
| def invalid_cases():
rng = Random(1234)
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
# using mode_max_count, so that the extra byte cannot be picked up as normal list content
yield f'{name}_extra_byte', \
invalid_test_case(lambda: serialize(
container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff')
if len(offsets) != 0:
# Note: there are many more ways to have invalid offsets,
# these are just example to get clients started looking into hardening ssz.
for mode in [RandomizationMode.mode_random,
RandomizationMode.mode_nil_count,
RandomizationMode.mode_one_count,
RandomizationMode.mode_max_count]:
if len(offsets) != 0:
for index, offset_index in enumerate(offsets):
yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x + 1
))
yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: 0
))
if index == 0:
yield f'{name}_{mode.to_name()}_offset_{offset_index}_minus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x - 1
))
if mode == RandomizationMode.mode_max_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:2]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_overflow', \
invalid_test_case(lambda: serialized)
if mode == RandomizationMode.mode_one_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:1]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \
invalid_test_case(lambda: serialized)
|
27,966 | def check_config_file(args):
"""
LOG and check about the config file usage.
If a config file is set but does not exists the program will
exit.
LOG is not initialized in the process_config_file function yet
so we can not log the usage there. Using print will
always print out the config file data which can mess up the
tests depending on the output.
"""
if 'config' in args and not os.path.exists(args.config):
LOG.error("Configuration file '%s' does not exists.",
args.config)
sys.exit(1)
if 'config_file' not in args or not os.path.exists(args.config_file):
return None
cfg = load_json_or_empty(args.config_file, default={})
if cfg.get("enabled"):
LOG.debug("Using config file: '{0}'.".format(args.config_file))
return cfg.get('analyzer', [])
LOG.debug("Config file '{0}' is available but disabled.".format(
args.config_file))
| def check_config_file(args):
"""
LOG and check about the config file usage.
If a config file is set but does not exists the program will
exit.
LOG is not initialized in the process_config_file function yet
so we can not log the usage there. Using print will
always print out the config file data which can mess up the
tests depending on the output.
"""
if 'config' in args and not os.path.exists(args.config):
LOG.error("Configuration file '%s' does not exists.",
args.config)
sys.exit(1)
if 'config_file' not in args or not os.path.exists(args.config_file):
return None
cfg = load_json_or_empty(args.config_file, default={})
if cfg.get("enabled"):
LOG.debug("Using config file: '%s'.", args.config_file)
return cfg.get('analyzer', [])
LOG.debug("Config file '{0}' is available but disabled.".format(
args.config_file))
|
22,602 | def forward(model: Model[InT, OutT], X: InT, is_train: bool) -> Tuple[OutT, Callable]:
nO = model.get_dim("nO")
nP = model.get_dim("nP")
nI = model.get_dim("nI")
b = model.get_param("b")
W = cast(Array2d, model.get_param("W"))
W = W.reshape((nO * nP, nI))
Y = model.ops.gemm(X, W, trans2=True)
Y += b.reshape((nO * nP,))
Y = Y.reshape((Y.shape[0], nO, nP))
best, which = model.ops.maxout(Y)
def backprop(d_best: OutT) -> InT:
dY = model.ops.backprop_maxout(d_best, which, nP)
model.inc_grad("b", dY.sum(axis=0))
dY = dY.reshape((dY.shape[0], nO * nP))
model.inc_grad("W", model.ops.gemm(dY, X, trans1=True).reshape((nO, nP, nI)))
return model.ops.gemm(dY, W.reshape((nO * nP, nI)))
return best, backprop
| def forward(model: Model[InT, OutT], X: InT, is_train: bool) -> Tuple[OutT, Callable]:
nO = model.get_dim("nO")
nP = model.get_dim("nP")
nI = model.get_dim("nI")
b = model.get_param("b")
W = model.get_param("W")
W = W.reshape((nO * nP, nI))
Y = model.ops.gemm(X, W, trans2=True)
Y += b.reshape((nO * nP,))
Y = Y.reshape((Y.shape[0], nO, nP))
best, which = model.ops.maxout(Y)
def backprop(d_best: OutT) -> InT:
dY = model.ops.backprop_maxout(d_best, which, nP)
model.inc_grad("b", dY.sum(axis=0))
dY = dY.reshape((dY.shape[0], nO * nP))
model.inc_grad("W", model.ops.gemm(dY, X, trans1=True).reshape((nO, nP, nI)))
return model.ops.gemm(dY, W.reshape((nO * nP, nI)))
return best, backprop
|
3,904 | def extrema_bounding(G, compute="diameter"):
"""Compute requested extreme distance metric of undirected graph G
Computation is based on smart lower and upper bounds, and in practice
linear in the number of nodes, rather than quadratic (except for some
border cases such as complete graphs or circle shaped graphs).
Parameters
----------
G : NetworkX graph
An undirected graph
compute : string denoting the requesting metric
"diameter" for the maximal eccentricity value,
"radius" for the minimal eccentricity value,
"periphery" for the set of nodes with eccentricity equal to the diameter,
"center" for the set of nodes with eccentricity equal to the radius,
"eccentricities" for the maximum distance from each node to all other nodes in G
Returns
-------
value : value of the requested metric
int for "diameter" and "radius" or
list of nodes for "center" and "periphery" or
dictionary of eccentricity values keyed by node for "eccentricities"
Raises
------
NetworkXError
If the graph consists of multiple components or
If the compute parameter is passed an invalid argument
Notes
-----
This algorithm was proposed in the following papers:
F.W. Takes and W.A. Kosters, Determining the Diameter of Small World
Networks, in Proceedings of the 20th ACM International Conference on
Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011.
doi: https://doi.org/10.1145/2063576.2063748
F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of
Large Graphs, Algorithms 6(1): 100-118, 2013.
doi: https://doi.org/10.3390/a6010100
M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes,
Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected)
Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015.
doi: https://doi.org/10.1016/j.tcs.2015.02.033
"""
# init variables
degrees = dict(G.degree()) # start with the highest degree node
minlowernode = max(degrees, key=degrees.get)
N = len(degrees) # number of nodes
# alternate between smallest lower and largest upper bound
high = False
# status variables
ecc_lower = dict.fromkeys(G, 0)
ecc_upper = dict.fromkeys(G, N)
candidates = set(G)
# (re)set bound extremes
minlower = N
maxlower = 0
minupper = N
maxupper = 0
# repeat the following until there are no more candidates
while candidates:
if high:
current = maxuppernode # select node with largest upper bound
else:
current = minlowernode # select node with smallest lower bound
high = not high
# get distances from/to current node and derive eccentricity
dist = dict(nx.single_source_shortest_path_length(G, current))
if len(dist) != N:
msg = "Cannot compute metric because graph is not connected."
raise nx.NetworkXError(msg)
current_ecc = max(dist.values())
# print status update
# print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/"
# + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is "
# + str(current_ecc))
# print(ecc_upper)
# (re)set bound extremes
maxuppernode = None
minlowernode = None
# update node bounds
for i in candidates:
# update eccentricity bounds
d = dist[i]
ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)
# update min/max values of lower and upper bounds
minlower = min(ecc_lower[i], minlower)
maxlower = max(ecc_lower[i], maxlower)
minupper = min(ecc_upper[i], minupper)
maxupper = max(ecc_upper[i], maxupper)
# update candidate set
if compute == "diameter":
ruled_out = {
i
for i in candidates
if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper
}
elif compute == "radius":
ruled_out = {
i
for i in candidates
if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower
}
elif compute == "periphery":
ruled_out = {
i
for i in candidates
if ecc_upper[i] < maxlower
and (maxlower == maxupper or ecc_lower[i] > maxupper)
}
elif compute == "center":
ruled_out = {
i
for i in candidates
if ecc_lower[i] > minupper
and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)
}
elif compute == "eccentricities":
ruled_out = set()
else:
msg = "The argument passed to compute parameter is invalid. Please enter one of the following extreme distance metrics: 'diameter', 'radius', 'periphery', 'center', 'eccentricities'"
raise nx.NetworkXError(msg)
ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
candidates -= ruled_out
# for i in ruled_out:
# print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
# (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
# print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
# (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower))
# print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper))
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower
# and 2 * ecc_lower[4] >= maxupper))
# updating maxuppernode and minlowernode for selection in next round
for i in candidates:
if (
minlowernode is None
or (
ecc_lower[i] == ecc_lower[minlowernode]
and degrees[i] > degrees[minlowernode]
)
or (ecc_lower[i] < ecc_lower[minlowernode])
):
minlowernode = i
if (
maxuppernode is None
or (
ecc_upper[i] == ecc_upper[maxuppernode]
and degrees[i] > degrees[maxuppernode]
)
or (ecc_upper[i] > ecc_upper[maxuppernode])
):
maxuppernode = i
# print status update
# print (" min=" + str(minlower) + "/" + str(minupper) +
# " max=" + str(maxlower) + "/" + str(maxupper) +
# " candidates: " + str(len(candidates)))
# print("cand:",candidates)
# print("ecc_l",ecc_lower)
# print("ecc_u",ecc_upper)
# wait = input("press Enter to continue")
# return the correct value of the requested metric
if compute == "diameter":
return maxlower
elif compute == "radius":
return minupper
elif compute == "periphery":
p = [v for v in G if ecc_lower[v] == maxlower]
return p
elif compute == "center":
c = [v for v in G if ecc_upper[v] == minupper]
return c
elif compute == "eccentricities":
return ecc_lower
return None
| def extrema_bounding(G, compute="diameter"):
"""Compute requested extreme distance metric of undirected graph G
Computation is based on smart lower and upper bounds, and in practice
linear in the number of nodes, rather than quadratic (except for some
border cases such as complete graphs or circle shaped graphs).
Parameters
----------
G : NetworkX graph
An undirected graph
compute : string denoting the requesting metric
"diameter" for the maximal eccentricity value,
"radius" for the minimal eccentricity value,
"periphery" for the set of nodes with eccentricity equal to the diameter,
"center" for the set of nodes with eccentricity equal to the radius,
"eccentricities" for the maximum distance from each node to all other nodes in G
Returns
-------
value : value of the requested metric
int for "diameter" and "radius" or
list of nodes for "center" and "periphery" or
dictionary of eccentricity values keyed by node for "eccentricities"
Raises
------
NetworkXError
If the graph consists of multiple components or
If the compute parameter is passed an invalid argument
Notes
-----
This algorithm was proposed in the following papers:
F.W. Takes and W.A. Kosters, Determining the Diameter of Small World
Networks, in Proceedings of the 20th ACM International Conference on
Information and Knowledge Management (CIKM 2011), pp. 1191-1196, 2011.
doi: https://doi.org/10.1145/2063576.2063748
F.W. Takes and W.A. Kosters, Computing the Eccentricity Distribution of
Large Graphs, Algorithms 6(1): 100-118, 2013.
doi: https://doi.org/10.3390/a6010100
M. Borassi, P. Crescenzi, M. Habib, W.A. Kosters, A. Marino and F.W. Takes,
Fast Graph Diameter and Radius BFS-Based Computation in (Weakly Connected)
Real-World Graphs, Theoretical Computer Science 586: 59-80, 2015.
doi: https://doi.org/10.1016/j.tcs.2015.02.033
"""
# init variables
degrees = dict(G.degree()) # start with the highest degree node
minlowernode = max(degrees, key=degrees.get)
N = len(degrees) # number of nodes
# alternate between smallest lower and largest upper bound
high = False
# status variables
ecc_lower = dict.fromkeys(G, 0)
ecc_upper = dict.fromkeys(G, N)
candidates = set(G)
# (re)set bound extremes
minlower = N
maxlower = 0
minupper = N
maxupper = 0
# repeat the following until there are no more candidates
while candidates:
if high:
current = maxuppernode # select node with largest upper bound
else:
current = minlowernode # select node with smallest lower bound
high = not high
# get distances from/to current node and derive eccentricity
dist = dict(nx.single_source_shortest_path_length(G, current))
if len(dist) != N:
msg = "Cannot compute metric because graph is not connected."
raise nx.NetworkXError(msg)
current_ecc = max(dist.values())
# print status update
# print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/"
# + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is "
# + str(current_ecc))
# print(ecc_upper)
# (re)set bound extremes
maxuppernode = None
minlowernode = None
# update node bounds
for i in candidates:
# update eccentricity bounds
d = dist[i]
ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)
# update min/max values of lower and upper bounds
minlower = min(ecc_lower[i], minlower)
maxlower = max(ecc_lower[i], maxlower)
minupper = min(ecc_upper[i], minupper)
maxupper = max(ecc_upper[i], maxupper)
# update candidate set
if compute == "diameter":
ruled_out = {
i
for i in candidates
if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper
}
elif compute == "radius":
ruled_out = {
i
for i in candidates
if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower
}
elif compute == "periphery":
ruled_out = {
i
for i in candidates
if ecc_upper[i] < maxlower
and (maxlower == maxupper or ecc_lower[i] > maxupper)
}
elif compute == "center":
ruled_out = {
i
for i in candidates
if ecc_lower[i] > minupper
and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)
}
elif compute == "eccentricities":
ruled_out = set()
else:
msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'"
raise nx.NetworkXError(msg)
ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
candidates -= ruled_out
# for i in ruled_out:
# print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
# (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
# print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
# (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower))
# print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper))
# print("NODE 4: %g"%(ecc_upper[4] <= maxlower
# and 2 * ecc_lower[4] >= maxupper))
# updating maxuppernode and minlowernode for selection in next round
for i in candidates:
if (
minlowernode is None
or (
ecc_lower[i] == ecc_lower[minlowernode]
and degrees[i] > degrees[minlowernode]
)
or (ecc_lower[i] < ecc_lower[minlowernode])
):
minlowernode = i
if (
maxuppernode is None
or (
ecc_upper[i] == ecc_upper[maxuppernode]
and degrees[i] > degrees[maxuppernode]
)
or (ecc_upper[i] > ecc_upper[maxuppernode])
):
maxuppernode = i
# print status update
# print (" min=" + str(minlower) + "/" + str(minupper) +
# " max=" + str(maxlower) + "/" + str(maxupper) +
# " candidates: " + str(len(candidates)))
# print("cand:",candidates)
# print("ecc_l",ecc_lower)
# print("ecc_u",ecc_upper)
# wait = input("press Enter to continue")
# return the correct value of the requested metric
if compute == "diameter":
return maxlower
elif compute == "radius":
return minupper
elif compute == "periphery":
p = [v for v in G if ecc_lower[v] == maxlower]
return p
elif compute == "center":
c = [v for v in G if ecc_upper[v] == minupper]
return c
elif compute == "eccentricities":
return ecc_lower
return None
|
20,007 | def analyze_color(rgb_img, mask, hist_plot_type=None):
"""Analyze the color properties of an image object
Inputs:
rgb_img = RGB image data
mask = Binary mask made from selected contours
hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv'
Returns:
analysis_image = histogram output
:param rgb_img: numpy.ndarray
:param mask: numpy.ndarray
:param hist_plot_type: str
:return analysis_images: list
"""
params.device += 1
if len(np.shape(rgb_img)) < 3:
fatal_error("rgb_img must be an RGB image")
# Mask the input image
masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
# Extract the blue, green, and red channels
b, g, r = cv2.split(masked)
# Convert the BGR image to LAB
lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
# Extract the lightness, green-magenta, and blue-yellow channels
l, m, y = cv2.split(lab)
# Convert the BGR image to HSV
hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
# Extract the hue, saturation, and value channels
h, s, v = cv2.split(hsv)
# Color channel dictionary
channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}
# Histogram plot types
hist_types = {"ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
"RGB": ("b", "g", "r"),
"LAB": ("l", "m", "y"),
"HSV": ("h", "s", "v")}
if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
fatal_error("The histogram plot type was " + str(hist_plot_type) +
', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
# Store histograms, plotting colors, and plotting labels
histograms = {
"b": {"label": "blue", "graph_color": "blue",
"hist": [float(l[0]) for l in cv2.calcHist([channels["b"]], [0], mask, [256], [0, 255])]},
"g": {"label": "green", "graph_color": "forestgreen",
"hist": [float(l[0]) for l in cv2.calcHist([channels["g"]], [0], mask, [256], [0, 255])]},
"r": {"label": "red", "graph_color": "red",
"hist": [float(l[0]) for l in cv2.calcHist([channels["r"]], [0], mask, [256], [0, 255])]},
"l": {"label": "lightness", "graph_color": "dimgray",
"hist": [float(l[0]) for l in cv2.calcHist([channels["l"]], [0], mask, [256], [0, 255])]},
"m": {"label": "green-magenta", "graph_color": "magenta",
"hist": [float(l[0]) for l in cv2.calcHist([channels["m"]], [0], mask, [256], [0, 255])]},
"y": {"label": "blue-yellow", "graph_color": "yellow",
"hist": [float(l[0]) for l in cv2.calcHist([channels["y"]], [0], mask, [256], [0, 255])]},
"h": {"label": "hue", "graph_color": "blueviolet",
"hist": [float(l[0]) for l in cv2.calcHist([channels["h"]], [0], mask, [256], [0, 255])]},
"s": {"label": "saturation", "graph_color": "cyan",
"hist": [float(l[0]) for l in cv2.calcHist([channels["s"]], [0], mask, [256], [0, 255])]},
"v": {"label": "value", "graph_color": "orange",
"hist": [float(l[0]) for l in cv2.calcHist([channels["v"]], [0], mask, [256], [0, 255])]}
}
# Create list of bin labels for 8-bit data
binval = np.arange(0, 256)
bin_values = [l for l in binval]
analysis_image = None
# Create a dataframe of bin labels and histogram data
dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})
# Make the histogram figure using plotnine
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB':
df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blue', 'green', 'red'])
)
elif hist_plot_type.upper() == 'LAB':
df_lab = pd.melt(dataset, id_vars=['bins'],
value_vars=['lightness', 'green-magenta', 'blue-yellow'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['yellow', 'magenta', 'dimgray'])
)
elif hist_plot_type.upper() == 'HSV':
df_hsv = pd.melt(dataset, id_vars=['bins'],
value_vars=['hue', 'saturation', 'value'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blueviolet', 'cyan', 'orange'])
)
elif hist_plot_type.upper() == 'ALL':
s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
'dimgray', 'red', 'cyan', 'orange']
df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel',
value_name='Pixels')
hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(color_channels)
)
analysis_image = hist_fig
# Hue values of zero are red but are also the value for pixels where hue is undefined
# The hue value of a pixel will be undefined when the color values are saturated
# Therefore, hue values of zero are excluded from the calculations below
# Calculate the median hue value
# The median is rescaled from the encoded 0-179 range to the 0-359 degree range
hue_median = np.median(h[np.where(h > 0)]) * 2
# Calculate the circular mean and standard deviation of the encoded hue values
# The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2
# Store into lists instead for pipeline and print_results
# stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median}
# Plot or print the histogram
if hist_plot_type is not None:
if params.debug == 'print':
hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
elif params.debug == 'plot':
print(hist_fig)
# Store into global measurements
# RGB signal values are in an unsigned 8-bit scale of 0-255
rgb_values = [i for i in range(0, 256)]
# Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
hue_values = [i * 2 + 1 for i in range(0, 180)]
# Percentage values on a 0-100 scale (lightness, saturation, and value)
percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
# Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
diverging_values = [i for i in range(-128, 128)]
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
elif hist_plot_type.upper() == 'LAB':
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
elif hist_plot_type.upper() == 'HSV':
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
elif hist_plot_type.upper() == 'ALL':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
# Always save hue stats
outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_circular_mean, label='degrees')
outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
outputs.add_observation(variable='hue_median', trait='hue median',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
# Store images
outputs.images.append([analysis_image])
return analysis_image
| def analyze_color(rgb_img, mask, hist_plot_type=None):
"""Analyze the color properties of an image object
Inputs:
rgb_img = RGB image data
mask = Binary mask made from selected contours
hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv'
Returns:
analysis_image = histogram output
:param rgb_img: numpy.ndarray
:param mask: numpy.ndarray
:param hist_plot_type: str
:return analysis_images: list
"""
params.device += 1
if len(np.shape(rgb_img)) < 3:
fatal_error("rgb_img must be an RGB image")
# Mask the input image
masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
# Extract the blue, green, and red channels
b, g, r = cv2.split(masked)
# Convert the BGR image to LAB
lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
# Extract the lightness, green-magenta, and blue-yellow channels
l, m, y = cv2.split(lab)
# Convert the BGR image to HSV
hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
# Extract the hue, saturation, and value channels
h, s, v = cv2.split(hsv)
# Color channel dictionary
channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}
# Histogram plot types
hist_types = {"ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
"RGB": ("b", "g", "r"),
"LAB": ("l", "m", "y"),
"HSV": ("h", "s", "v")}
if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
fatal_error("The histogram plot type was " + str(hist_plot_type) +
', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
# Store histograms, plotting colors, and plotting labels
histograms = {
"b": {"label": "blue", "graph_color": "blue",
"hist": [float(l[0]) for l in cv2.calcHist([channels["b"]], [0], mask, [256], [0, 255])]},
"g": {"label": "green", "graph_color": "forestgreen",
"hist": [float(l[0]) for l in cv2.calcHist([channels["g"]], [0], mask, [256], [0, 255])]},
"r": {"label": "red", "graph_color": "red",
"hist": [float(l[0]) for l in cv2.calcHist([channels["r"]], [0], mask, [256], [0, 255])]},
"l": {"label": "lightness", "graph_color": "dimgray",
"hist": [float(l[0]) for l in cv2.calcHist([channels["l"]], [0], mask, [256], [0, 255])]},
"m": {"label": "green-magenta", "graph_color": "magenta",
"hist": [float(l[0]) for l in cv2.calcHist([channels["m"]], [0], mask, [256], [0, 255])]},
"y": {"label": "blue-yellow", "graph_color": "yellow",
"hist": [float(l[0]) for l in cv2.calcHist([channels["y"]], [0], mask, [256], [0, 255])]},
"h": {"label": "hue", "graph_color": "blueviolet",
"hist": [float(l[0]) for l in cv2.calcHist([channels["h"]], [0], mask, [256], [0, 255])]},
"s": {"label": "saturation", "graph_color": "cyan",
"hist": [float(l[0]) for l in cv2.calcHist([channels["s"]], [0], mask, [256], [0, 255])]},
"v": {"label": "value", "graph_color": "orange",
"hist": [float(l[0]) for l in cv2.calcHist([channels["v"]], [0], mask, [256], [0, 255])]}
}
# Create list of bin labels for 8-bit data
binval = np.arange(0, 256)
bin_values = [l for l in binval]
analysis_image = None
# Create a dataframe of bin labels and histogram data
dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})
# Make the histogram figure using plotnine
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB':
df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blue', 'green', 'red'])
)
elif hist_plot_type.upper() == 'LAB':
df_lab = pd.melt(dataset, id_vars=['bins'],
value_vars=['lightness', 'green-magenta', 'blue-yellow'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['yellow', 'magenta', 'dimgray'])
)
elif hist_plot_type.upper() == 'HSV':
df_hsv = pd.melt(dataset, id_vars=['bins'],
value_vars=['hue', 'saturation', 'value'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blueviolet', 'cyan', 'orange'])
)
elif hist_plot_type.upper() == 'ALL':
s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
'dimgray', 'red', 'cyan', 'orange']
df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel',
value_name='Pixels')
hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(color_channels)
)
analysis_image = hist_fig
# Hue values of zero are red but are also the value for pixels where hue is undefined
# The hue value of a pixel will be undefined when the color values are saturated
# Therefore, hue values of zero are excluded from the calculations below
# Calculate the median hue value
# The median is rescaled from the encoded 0-179 range to the 0-359 degree range
hue_median = np.median(h[np.where(h > 0)]) * 2
# Calculate the circular mean and standard deviation of the encoded hue values
# The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2
# Store into lists instead for pipeline and print_results
# stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median}
# Plot or print the histogram
if hist_plot_type is not None:
if params.debug == 'print':
hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
elif params.debug == 'plot':
print(hist_fig)
# Store into global measurements
# RGB signal values are in an unsigned 8-bit scale of 0-255
rgb_values = [i for i in range(0, 256)]
# Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
hue_values = [i * 2 + 1 for i in range(0, 180)]
# Percentage values on a 0-100 scale (lightness, saturation, and value)
percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
# Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
diverging_values = [i for i in range(-128, 128)]
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
elif hist_plot_type.upper() == 'LAB' or hist_plot_type.upper() == 'ALL':
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
elif hist_plot_type.upper() == 'HSV':
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
elif hist_plot_type.upper() == 'ALL':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
# Always save hue stats
outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_circular_mean, label='degrees')
outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
outputs.add_observation(variable='hue_median', trait='hue median',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
# Store images
outputs.images.append([analysis_image])
return analysis_image
|
57,841 | def domain_command(client, args):
domain = args.get('domain')
domain_details = client.domain_details(hostname=domain)
domain_subdomains = client.domain_subdomains(hostname=domain)
domain_whois = client.get_whois(query_type="domain", hostname=domain)
domain_tags = client.domain_tags(hostname=domain)
admin_contact = [{
"Name": x.get('name', None),
"Email": x.get('email', None),
"Phone": x.get('telephone', None),
"Country": x.get('country', None)
} for x in domain_whois.get('contacts', []) if "admin" in x.get('type', '').lower()]
tech_contact = [{
"Name": x.get('name', None),
"Email": x.get('email', None),
"Phone": x.get('telephone', None),
"Country": x.get('country', None)
} for x in domain_whois.get('contacts', []) if "tech" in x.get('type', '').lower()]
registrant_contact = [{
"Name": x.get('name', None),
"Email": x.get('email', None),
"Phone": x.get('telephone', None),
"Country": x.get('country', None)
} for x in domain_whois.get('contacts', []) if "registrant" in x.get('type', '').lower()]
registrar_contact = [{
"Name": x.get('name', None),
"Email": x.get('email', None),
"Phone": x.get('telephone', None),
"Country": x.get('country', None)
} for x in domain_whois.get('contacts', []) if "registrar" in x.get('type', '').lower()]
domain_data = {
"Name": domain,
"DNS": ",".join([x.get('ip', '') for x in domain_details.get('current_dns', {}).get('a', {}).get('values', [])]),
"NameServers": ",".join([x.get('nameserver', '') for x in domain_details.get('current_dns', {}).get('ns', {}).get('values', [])]),
"Organization": domain_details.get('name', None),
"Subdomains": ",".join(domain_subdomains.get('subdomains', [])),
"WHOIS": {
"DomainStatus": domain_whois.get('status'),
"NameServers": ",".join(domain_whois.get('nameServers', [])) if domain_whois.get('nameServers', []) else None,
"CreationDate": domain_whois.get('createdDate'),
"UpdatedDate": domain_whois.get('updatedDate'),
"ExpirationDate": domain_whois.get('expiresData'),
"Registrant": {
"Name": registrant_contact[0].get('Name', None) if registrant_contact else None,
"Email": registrant_contact[0].get('Email', None) if registrant_contact else None,
"Phone": registrant_contact[0].get('Phone', None) if registrant_contact else None
},
"Registrar": {
"Name": registrar_contact[0].get('Name', None) if registrar_contact else None,
"Email": registrar_contact[0].get('Email', None) if registrar_contact else None,
"Phone": registrar_contact[0].get('Phone', None) if registrar_contact else None
},
"Admin": {
"Name": admin_contact[0].get('Name', None) if admin_contact else None,
"Email": admin_contact[0].get('Email', None) if admin_contact else None,
"Phone": admin_contact[0].get('Phone', None) if admin_contact else None
}
},
"Tags": ",".join([x for x in domain_tags]),
"Admin": {
"Country": admin_contact[0].get('Country', None) if admin_contact else None,
"Name": admin_contact[0].get('Name', None) if admin_contact else None,
"Email": admin_contact[0].get('Email', None) if admin_contact else None,
"Phone": admin_contact[0].get('Phone', None) if admin_contact else None
},
"Registrant": {
"Country": registrant_contact[0].get('Country', None) if registrant_contact else None,
"Name": registrant_contact[0].get('Name', None) if registrant_contact else None,
"Email": registrant_contact[0].get('Email', None) if registrant_contact else None,
"Phone": registrant_contact[0].get('Phone', None) if registrant_contact else None
}
}
md = tableToMarkdown(f"Domain {domain}:", domain_data)
command_results = CommandResults(
outputs_prefix="Domain",
outputs_key_field="Name",
outputs=domain_data,
readable_output=md
)
return_results(command_results)
| def domain_command(client, args):
domain = args.get('domain')
domain_details = client.domain_details(hostname=domain)
domain_subdomains = client.domain_subdomains(hostname=domain)
domain_whois = client.get_whois(query_type="domain", hostname=domain)
domain_tags = client.domain_tags(hostname=domain)
admin_contact = [{
"Name": x.get('name'),
"Email": x.get('email'),
"Phone": x.get('telephone'),
"Country": x.get('country')
} for x in domain_whois.get('contacts', []) if "admin" in x.get('type', '').lower()]
tech_contact = [{
"Name": x.get('name', None),
"Email": x.get('email', None),
"Phone": x.get('telephone', None),
"Country": x.get('country', None)
} for x in domain_whois.get('contacts', []) if "tech" in x.get('type', '').lower()]
registrant_contact = [{
"Name": x.get('name', None),
"Email": x.get('email', None),
"Phone": x.get('telephone', None),
"Country": x.get('country', None)
} for x in domain_whois.get('contacts', []) if "registrant" in x.get('type', '').lower()]
registrar_contact = [{
"Name": x.get('name', None),
"Email": x.get('email', None),
"Phone": x.get('telephone', None),
"Country": x.get('country', None)
} for x in domain_whois.get('contacts', []) if "registrar" in x.get('type', '').lower()]
domain_data = {
"Name": domain,
"DNS": ",".join([x.get('ip', '') for x in domain_details.get('current_dns', {}).get('a', {}).get('values', [])]),
"NameServers": ",".join([x.get('nameserver', '') for x in domain_details.get('current_dns', {}).get('ns', {}).get('values', [])]),
"Organization": domain_details.get('name', None),
"Subdomains": ",".join(domain_subdomains.get('subdomains', [])),
"WHOIS": {
"DomainStatus": domain_whois.get('status'),
"NameServers": ",".join(domain_whois.get('nameServers', [])) if domain_whois.get('nameServers', []) else None,
"CreationDate": domain_whois.get('createdDate'),
"UpdatedDate": domain_whois.get('updatedDate'),
"ExpirationDate": domain_whois.get('expiresData'),
"Registrant": {
"Name": registrant_contact[0].get('Name', None) if registrant_contact else None,
"Email": registrant_contact[0].get('Email', None) if registrant_contact else None,
"Phone": registrant_contact[0].get('Phone', None) if registrant_contact else None
},
"Registrar": {
"Name": registrar_contact[0].get('Name', None) if registrar_contact else None,
"Email": registrar_contact[0].get('Email', None) if registrar_contact else None,
"Phone": registrar_contact[0].get('Phone', None) if registrar_contact else None
},
"Admin": {
"Name": admin_contact[0].get('Name', None) if admin_contact else None,
"Email": admin_contact[0].get('Email', None) if admin_contact else None,
"Phone": admin_contact[0].get('Phone', None) if admin_contact else None
}
},
"Tags": ",".join([x for x in domain_tags]),
"Admin": {
"Country": admin_contact[0].get('Country', None) if admin_contact else None,
"Name": admin_contact[0].get('Name', None) if admin_contact else None,
"Email": admin_contact[0].get('Email', None) if admin_contact else None,
"Phone": admin_contact[0].get('Phone', None) if admin_contact else None
},
"Registrant": {
"Country": registrant_contact[0].get('Country', None) if registrant_contact else None,
"Name": registrant_contact[0].get('Name', None) if registrant_contact else None,
"Email": registrant_contact[0].get('Email', None) if registrant_contact else None,
"Phone": registrant_contact[0].get('Phone', None) if registrant_contact else None
}
}
md = tableToMarkdown(f"Domain {domain}:", domain_data)
command_results = CommandResults(
outputs_prefix="Domain",
outputs_key_field="Name",
outputs=domain_data,
readable_output=md
)
return_results(command_results)
|
25,991 | def validate_scale_unit_ranges(namespace):
unit_num = namespace.scale_units
err_msg = "The number of --scale-units should in range [2, 50]."
if unit_num and (unit_num < 2 or unit_num > 50):
raise InvalidArgumentValueError(err_msg)
| def validate_scale_unit_ranges(namespace):
unit_num = namespace.scale_units
err_msg = "The number of --scale-units should in range [2, 50]."
if unit_num is not None and (unit_num < 2 or unit_num > 50):
raise InvalidArgumentValueError(err_msg)
|
12,425 | def handle(name, cfg, cloud, log, _args):
if "autoinstall" not in cfg:
LOG.debug(
"Skipping module named %s, no 'autoinstall' key in configuration",
name,
)
return
snap_list, _ = subp(["snap", "list"])
installer_present = None
for snap_name in LIVE_INSTALLER_SNAPS:
if re.search(snap_name, snap_list):
installer_present = snap_name
if not installer_present:
LOG.warning(
"Skipping autoinstall module. Expected one of the Ubuntu"
" installer snap packages to be present: %s",
", ".join(LIVE_INSTALLER_SNAPS),
)
return
validate_config_schema(cfg["autoinstall"])
LOG.debug(
"Valid autoinstall schema. Config will be processed by %s",
installer_present,
)
| def handle(name, cfg, cloud, log, _args):
if "autoinstall" not in cfg:
LOG.debug(
"Skipping module named %s, no 'autoinstall' key in configuration",
name,
)
return
snap_list, _ = subp(["snap", "list"])
installer_present = None
for snap_name in LIVE_INSTALLER_SNAPS:
if re.search(snap_name, snap_list):
installer_present = snap_name
if not installer_present:
LOG.warning(
"Skipping autoinstall module. Expected one of the Ubuntu"
" installer snap packages to be present: %s",
", ".join(LIVE_INSTALLER_SNAPS),
)
return
validate_config_schema(cfg)
LOG.debug(
"Valid autoinstall schema. Config will be processed by %s",
installer_present,
)
|
48,468 | def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', aliases=['service', 'unit']),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
force=dict(type='bool'),
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
required_by=dict(
state=('name', ),
enabled=('name', ),
masked=('name', ),
),
)
unit = module.params['name']
if unit is not None:
for globpattern in (r"*", r"?", r"["):
if globpattern in unit:
module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
systemctl = module.get_bin_path('systemctl', True)
if os.getenv('XDG_RUNTIME_DIR') is None:
os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
''' Set CLI options depending on params '''
# if scope is 'system' or None, we can ignore as there is no extra switch.
# The other choices match the corresponding switch
if module.params['scope'] != 'system':
systemctl += " --%s" % module.params['scope']
if module.params['no_block']:
systemctl += " --no-block"
if module.params['force']:
systemctl += " --force"
rc = 0
out = err = ''
result = dict(
name=unit,
changed=False,
status=dict(),
)
# Run daemon-reload first, if requested
if module.params['daemon_reload'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
# Run daemon-reexec
if module.params['daemon_reexec'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
if unit:
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
# load return of systemctl show into dictionary for easy access and return
if out:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
# Check for loading error
if is_systemd and not is_masked and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
# Workaround for https://github.com/ansible/ansible/issues/71528
elif err and rc == 1 and 'Failed to parse bus message' in err:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
unit_base, sep, suffix = unit.partition('@')
unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep)
(rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search))
is_systemd = unit_search in out
(rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
result['status']['ActiveState'] = out.rstrip('\n')
else:
# list taken from man systemctl(1) for systemd 244
valid_enabled_states = [
"enabled",
"enabled-runtime",
"linked",
"linked-runtime",
"masked",
"masked-runtime",
"static",
"indirect",
"disabled",
"generated",
"transient"]
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
if out.strip() in valid_enabled_states:
is_systemd = True
else:
# fallback list-unit-files as show does not work on some systems (chroot)
# not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
(rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
if rc == 0:
is_systemd = True
else:
# Check for systemctl command
module.run_command(systemctl, check_rc=True)
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
masked = out.strip() == "masked"
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
if out.strip() in ('enabled-runtime',):
# enable-runtime is 'enable for only this boot of the system'
enabled = False
else:
enabled = True
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
is_initd and \
not out.strip().endswith('disabled') and \
sysv_is_enabled(unit):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']) or is_deactivating_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# check for chroot
elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
| def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', aliases=['service', 'unit']),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
force=dict(type='bool'),
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
required_by=dict(
state=('name', ),
enabled=('name', ),
masked=('name', ),
),
)
unit = module.params['name']
if unit is not None:
for globpattern in (r"*", r"?", r"["):
if globpattern in unit:
module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
systemctl = module.get_bin_path('systemctl', True)
if os.getenv('XDG_RUNTIME_DIR') is None:
os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
''' Set CLI options depending on params '''
# if scope is 'system' or None, we can ignore as there is no extra switch.
# The other choices match the corresponding switch
if module.params['scope'] != 'system':
systemctl += " --%s" % module.params['scope']
if module.params['no_block']:
systemctl += " --no-block"
if module.params['force']:
systemctl += " --force"
rc = 0
out = err = ''
result = dict(
name=unit,
changed=False,
status=dict(),
)
# Run daemon-reload first, if requested
if module.params['daemon_reload'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
# Run daemon-reexec
if module.params['daemon_reexec'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
if unit:
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
# load return of systemctl show into dictionary for easy access and return
if out:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
# Check for loading error
if is_systemd and not is_masked and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
# Workaround for https://github.com/ansible/ansible/issues/71528
elif err and rc == 1 and 'Failed to parse bus message' in err:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
unit_base, sep, suffix = unit.partition('@')
unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep)
(rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search))
is_systemd = unit_search in out
(rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
result['status']['ActiveState'] = out.rstrip('\n')
else:
# list taken from man systemctl(1) for systemd 244
valid_enabled_states = [
"enabled",
"enabled-runtime",
"linked",
"linked-runtime",
"masked",
"masked-runtime",
"static",
"indirect",
"disabled",
"generated",
"transient"]
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
if out.strip() in valid_enabled_states:
is_systemd = True
else:
# fallback list-unit-files as show does not work on some systems (chroot)
# not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
(rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
if rc == 0:
is_systemd = True
else:
# Check for systemctl command
module.run_command(systemctl, check_rc=True)
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
masked = out.strip() == "masked"
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
# enable-runtime is 'enable for only this boot of the system'
enabled = out.strip() != 'enabled-runtime'
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
is_initd and \
not out.strip().endswith('disabled') and \
sysv_is_enabled(unit):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']) or is_deactivating_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# check for chroot
elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
|
22,289 | def stream_to_open_named_file(stream, fd, filename, source_encoding=None, source_error='strict', target_encoding=None, target_error='strict'):
"""Writes a stream to the provided file descriptor, returns the file name. Closes file descriptor"""
# signature and behavor is somewhat odd, due to backwards compatibility, but this can/should be done better
CHUNK_SIZE = 1048576
try:
codecs.lookup(target_encoding)
except Exception:
target_encoding = util.DEFAULT_ENCODING # utf-8
try_source_encoding = True
while True:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
break
if source_encoding is not None and try_source_encoding:
# If a source encoding is given we use it to convert to the target encoding
try:
if not isinstance(chunk, text_type):
chunk = chunk.decode(source_encoding, source_error)
os.write(fd, chunk.encode(target_encoding, target_error))
except UnicodeDecodeError:
try_source_encoding = False
os.write(fd, chunk)
else:
# Compressed files must be encoded after they are uncompressed in the upload utility,
# while binary files should not be encoded at all.
if isinstance(chunk, text_type):
chunk = chunk.encode(target_encoding, target_error)
os.write(fd, chunk)
os.close(fd)
return filename
| def stream_to_open_named_file(stream, fd, filename, source_encoding=None, source_error='strict', target_encoding=None, target_error='strict'):
"""Writes a stream to the provided file descriptor, returns the file name. Closes file descriptor"""
# signature and behavor is somewhat odd, due to backwards compatibility, but this can/should be done better
CHUNK_SIZE = 1048576
try:
codecs.lookup(target_encoding)
except Exception:
target_encoding = util.DEFAULT_ENCODING # utf-8
try_source_encoding = True
while True:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
break
if use_source_encoding:
# If a source encoding is given we use it to convert to the target encoding
try:
if not isinstance(chunk, text_type):
chunk = chunk.decode(source_encoding, source_error)
os.write(fd, chunk.encode(target_encoding, target_error))
except UnicodeDecodeError:
try_source_encoding = False
os.write(fd, chunk)
else:
# Compressed files must be encoded after they are uncompressed in the upload utility,
# while binary files should not be encoded at all.
if isinstance(chunk, text_type):
chunk = chunk.encode(target_encoding, target_error)
os.write(fd, chunk)
os.close(fd)
return filename
|
25,175 | def infer_typing_newtype(node, context_itton=None):
"""Infer a typing.TypeVar(...) or typing.NewType(...) call"""
try:
func = next(node.func.infer(context=context_itton))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if func.qname() != "typing.NewType":
raise UseInferenceDefault
if not node.args:
raise UseInferenceDefault
derived = node.args[0].as_string().strip("'")
base = node.args[1].as_string().strip("'")
node = extract_node(TYPING_NEWTYPE_TEMPLATE.format(derived=derived, base=base))
return node.infer(context=context_itton)
| def infer_typing_newtype(node: nodes.Call, context_itton: Optional[InferenceContext] =None):
"""Infer a typing.TypeVar(...) or typing.NewType(...) call"""
try:
func = next(node.func.infer(context=context_itton))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if func.qname() != "typing.NewType":
raise UseInferenceDefault
if not node.args:
raise UseInferenceDefault
derived = node.args[0].as_string().strip("'")
base = node.args[1].as_string().strip("'")
node = extract_node(TYPING_NEWTYPE_TEMPLATE.format(derived=derived, base=base))
return node.infer(context=context_itton)
|
32,204 | def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, str],
first_fetch_time: str, query: Optional[str], mirror_direction: str,
mirror_tag: List[str]) -> Tuple[Dict[str, str], List[dict]]:
"""This function retrieves new incidents every interval (default is 1 minute).
:type client: ``Client``
:param client: XSOAR client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[str]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type query: ``Optional[str]``
:param query:
query to fetch the relevant incidents
:type mirror_direction: ``str``
:param mirror_direction:
Mirror direction for the fetched incidents
:type mirror_tag: ``List[str]``
:param mirror_tag:
The tags that you will mirror out of the incident.
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
last_fetch = last_run.get('last_fetch')
if not last_fetch:
last_fetch = first_fetch_time # type: ignore
latest_created_time = dateparser.parse(last_fetch)
incidents_result: List[Dict[str, Any]] = []
if query:
query += f' and created:>="{last_fetch}"'
else:
query = f'created:>="{last_fetch}"'
demisto.debug(f'Fetching incidents since last fetch: {last_fetch}')
incidents = client.search_incidents(
query=query,
max_results=max_results,
start_time=last_fetch
)
for incident in incidents:
incident_result: Dict[str, Any] = dict()
incident_result['dbotMirrorDirection'] = MIRROR_DIRECTION[mirror_direction] # type: ignore
incident['dbotMirrorInstance'] = demisto.integrationInstance()
incident_result['dbotMirrorTags'] = mirror_tag if mirror_tag else None # type: ignore
incident_result['dbotMirrorId'] = incident['id']
for key, value in incident.items():
if key in FIELDS_TO_COPY_FROM_REMOTE_INCIDENT:
incident_result[key] = value
incident_result['rawJSON'] = json.dumps(incident)
file_attachments = []
if incident.get('attachment') and len(incident.get('attachment', [])) > 0 and incident.get('investigationId'):
entries = client.get_incident_entries(
incident_id=incident['investigationId'], # type: ignore
from_date=0,
max_results=10,
categories=['attachments'],
tags=None,
tags_and_operator=False
)
for entry in entries:
if 'file' in entry and entry.get('file'):
file_entry_content = client.get_file_entry(entry.get('id')) # type: ignore
file_result = fileResult(entry['file'], file_entry_content)
if any(attachment.get('name') == entry['file'] for attachment in incident.get('attachment', [])):
if file_result['Type'] == EntryType.ERROR:
raise Exception(f"Error getting attachment: {str(file_result.get('Contents', ''))}")
file_attachments.append({
'path': file_result.get('FileID', ''),
'name': file_result.get('File', '')
})
incident_result['attachment'] = file_attachments
incidents_result.append(incident_result)
incident_created_time = dateparser.parse(incident.get('created')) # type: ignore
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time: # type: ignore
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': (latest_created_time + timedelta(microseconds=1)).strftime(XSOAR_DATE_FORMAT)} # type: ignore
return next_run, incidents_result
| def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, str],
first_fetch_time: str, query: Optional[str], mirror_direction: str,
mirror_tag: List[str]) -> Tuple[Dict[str, str], List[dict]]:
"""This function retrieves new incidents every interval (default is 1 minute).
:type client: ``Client``
:param client: XSOAR client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[str]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type query: ``Optional[str]``
:param query:
query to fetch the relevant incidents
:type mirror_direction: ``str``
:param mirror_direction:
Mirror direction for the fetched incidents
:type mirror_tag: ``List[str]``
:param mirror_tag:
The tags that you will mirror out of the incident.
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
last_fetch = last_run.get('last_fetch')
if not last_fetch:
last_fetch = first_fetch_time # type: ignore
latest_created_time = dateparser.parse(last_fetch)
incidents_result: List[Dict[str, Any]] = []
if query:
query += f' and created:>="{last_fetch}"'
else:
query = f'created:>="{last_fetch}"'
demisto.debug(f'Fetching incidents since last fetch: {last_fetch}')
incidents = client.search_incidents(
query=query,
max_results=max_results,
start_time=last_fetch
)
for incident in incidents:
incident_result: Dict[str, Any] = dict()
incident_result['dbotMirrorDirection'] = MIRROR_DIRECTION[mirror_direction] # type: ignore
incident['dbotMirrorInstance'] = demisto.integrationInstance()
incident_result['dbotMirrorTags'] = mirror_tag if mirror_tag else None # type: ignore
incident_result['dbotMirrorId'] = incident['id']
for key, value in incident.items():
if key in FIELDS_TO_COPY_FROM_REMOTE_INCIDENT:
incident_result[key] = value
incident_result['rawJSON'] = json.dumps(incident)
file_attachments = []
if incident.get('attachment') and len(incident.get('attachment', [])) > 0 and incident.get('investigationId'):
entries = client.get_incident_entries(
incident_id=incident['investigationId'], # type: ignore
from_date=0,
max_results=10,
categories=['attachments'],
tags=None,
tags_and_operator=False
)
for entry in entries:
if 'file' in entry and entry.get('file'):
file_entry_content = client.get_file_entry(entry.get('id')) # type: ignore
file_result = fileResult(entry['file'], file_entry_content)
if any(attachment.get('name') == entry['file'] for attachment in incident.get('attachment', [])):
if file_result['Type'] == EntryType.ERROR:
raise Exception(f"Error getting attachment: {str(file_result.get('Contents', ''))}")
file_attachments.append({
'path': file_result.get('FileID', ''),
'name': file_result.get('File', '')
})
incident_result['attachment'] = file_attachments
incidents_result.append(incident_result)
incident_created_time = dateparser.parse(incident.get('created')) # type: ignore
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time: # type: ignore[operator]
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': (latest_created_time + timedelta(microseconds=1)).strftime(XSOAR_DATE_FORMAT)} # type: ignore
return next_run, incidents_result
|
17,393 | def broadcast_dimension_size(
variables: List[Variable],
) -> Dict[Hashable, int]:
"""Extract dimension sizes from a dictionary of variables.
Raises ValueError if any dimensions have different sizes.
"""
dims: Dict[Hashable, int] = {}
for var in variables:
for dim, size in zip(var.dims, var.shape):
if dim in dims and size != dims[dim]:
raise ValueError("index %r not aligned" % dim)
dims[dim] = size
return dims
| def broadcast_dimension_size(
variables: List[Variable]
) -> Dict[Hashable, int]:
"""Extract dimension sizes from a dictionary of variables.
Raises ValueError if any dimensions have different sizes.
"""
dims: Dict[Hashable, int] = {}
for var in variables:
for dim, size in zip(var.dims, var.shape):
if dim in dims and size != dims[dim]:
raise ValueError("index %r not aligned" % dim)
dims[dim] = size
return dims
|
2,965 | def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=None,
private_key=None,
verbose=None,
progress_bar_type="tqdm",
):
"""
Load data from Google BigQuery.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future verion.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
.. versionchanged:: 0.24.0
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
*New in version 0.5.0 of pandas-gbq*.
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
This feature requires version 0.10.0 or later of the ``pandas-gbq``
package. It also requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.25.0
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
verbose : None, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module to
adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
progress_bar_type : Optional, str
If set, use the `tqdm <https://tqdm.github.io/>`__ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
Possible values of ``progress_bar_type`` include:
``None``
No progress bar.
``'tqdm'``
Use the :func:`tqdm.tqdm` function to print a progress bar
to :data:`sys.stderr`.
``'tqdm_notebook'``
Use the :func:`tqdm.tqdm_notebook` function to display a
progress bar as a Jupyter notebook widget.
``'tqdm_gui'``
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
Note that his feature requires version 0.12.0 or later of the
``pandas-gbq`` package. And it requires the ``tqdm`` package.
.. versionadded:: 1.0.0
Returns
-------
df: DataFrame
DataFrame representing results of query.
See Also
--------
pandas_gbq.read_gbq : This function in the pandas-gbq library.
DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
"""
pandas_gbq = _try_import()
kwargs = {}
# START: new kwargs. Don't populate unless explicitly set.
if use_bqstorage_api is not None:
kwargs["use_bqstorage_api"] = use_bqstorage_api
kwargs["progress_bar_type"] = progress_bar_type
# END: new kwargs
# START: deprecated kwargs. Don't populate unless explicitly set.
if verbose is not None:
kwargs["verbose"] = verbose
if private_key is not None:
kwargs["private_key"] = private_key
# END: deprecated kwargs
return pandas_gbq.read_gbq(
query,
project_id=project_id,
index_col=index_col,
col_order=col_order,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
dialect=dialect,
location=location,
configuration=configuration,
credentials=credentials,
**kwargs,
)
| def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=None,
private_key=None,
verbose=None,
progress_bar_type=None,
):
"""
Load data from Google BigQuery.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future verion.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
.. versionchanged:: 0.24.0
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
*New in version 0.5.0 of pandas-gbq*.
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
This feature requires version 0.10.0 or later of the ``pandas-gbq``
package. It also requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.25.0
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
verbose : None, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module to
adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
progress_bar_type : Optional, str
If set, use the `tqdm <https://tqdm.github.io/>`__ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
Possible values of ``progress_bar_type`` include:
``None``
No progress bar.
``'tqdm'``
Use the :func:`tqdm.tqdm` function to print a progress bar
to :data:`sys.stderr`.
``'tqdm_notebook'``
Use the :func:`tqdm.tqdm_notebook` function to display a
progress bar as a Jupyter notebook widget.
``'tqdm_gui'``
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
Note that his feature requires version 0.12.0 or later of the
``pandas-gbq`` package. And it requires the ``tqdm`` package.
.. versionadded:: 1.0.0
Returns
-------
df: DataFrame
DataFrame representing results of query.
See Also
--------
pandas_gbq.read_gbq : This function in the pandas-gbq library.
DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
"""
pandas_gbq = _try_import()
kwargs = {}
# START: new kwargs. Don't populate unless explicitly set.
if use_bqstorage_api is not None:
kwargs["use_bqstorage_api"] = use_bqstorage_api
kwargs["progress_bar_type"] = progress_bar_type
# END: new kwargs
# START: deprecated kwargs. Don't populate unless explicitly set.
if verbose is not None:
kwargs["verbose"] = verbose
if private_key is not None:
kwargs["private_key"] = private_key
# END: deprecated kwargs
return pandas_gbq.read_gbq(
query,
project_id=project_id,
index_col=index_col,
col_order=col_order,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
dialect=dialect,
location=location,
configuration=configuration,
credentials=credentials,
**kwargs,
)
|
46,294 | def is_diagonal(matrix, tol=1e-8):
"""Determine whether affine is a diagonal matrix.
Parameters
----------
matrix : 2-D array
The matrix to test.
tol : float, optional
Consider any entries with magnitude < `tol` as 0.
Returns
-------
is_diag : bool
Boolean indicating whether affine is diagonal.
"""
if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix must be square")
non_diag = matrix[~np.eye(matrix.shape[0], dtype=bool)]
if tol == 0:
return np.count_nonzero(non_diag) == 0
else:
return np.max(np.abs(non_diag)) <= tol
| def is_diagonal(matrix, tol=1e-8):
"""Determine whether affine is a diagonal matrix.
Parameters
----------
matrix : 2-D array
The matrix to test.
tol : float, optional
Consider any entries with magnitude < `tol` as 0.
Returns
-------
is_diag : bool
True if matrix is diagonal, False otherwise.
"""
if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix must be square")
non_diag = matrix[~np.eye(matrix.shape[0], dtype=bool)]
if tol == 0:
return np.count_nonzero(non_diag) == 0
else:
return np.max(np.abs(non_diag)) <= tol
|
19,202 | def patchelf_root_spec():
"""Return the root spec used to bootstrap patchelf"""
# TODO: patchelf is restricted to v0.13 since earlier versions have
# TODO: bugs that we don't to deal with, while v0.14 requires a C++17
# TODO: which may not be available on all platforms.
return _root_spec('[email protected]:0.13.1')
| def patchelf_root_spec():
"""Return the root spec used to bootstrap patchelf"""
# TODO: patchelf is restricted to v0.13 since earlier versions have
# TODO: bugs that we don't to deal with, while v0.14 requires a C++17
# TODO: which may not be available on all platforms.
return _root_spec('[email protected]:0.13')
|
34,225 | def configure_file_logging(logger_obj: logging.Logger, log_file: Optional[Text]):
if not log_file:
return
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
file_handler = logging.FileHandler(log_file, encoding='utf-8')
file_handler.setLevel(logger_obj.level)
file_handler.setFormatter(formatter)
logger_obj.addHandler(file_handler)
| def configure_file_logging(logger_obj: logging.Logger, log_file: Optional[Text]):
if not log_file:
return
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
file_handler = logging.FileHandler(log_file, encoding="utf-8")
file_handler.setLevel(logger_obj.level)
file_handler.setFormatter(formatter)
logger_obj.addHandler(file_handler)
|
36,568 | def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "gztar",
"bztar", or "xztar". Or any other registered format.
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir)
base_name = os.fsdecode(base_dir)
save_cwd = os.getcwd()
if root_dir is not None:
root_dir = os.fsdecode(root_dir)
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
base_dir = os.fsdecode(base_dir)
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format) from None
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
| def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "gztar",
"bztar", or "xztar". Or any other registered format.
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir)
base_name = os.fsdecode(base_name)
save_cwd = os.getcwd()
if root_dir is not None:
root_dir = os.fsdecode(root_dir)
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
base_dir = os.fsdecode(base_dir)
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format) from None
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
|
35,871 | def main():
parser = get_sendpayment_parser()
(options, args) = parser.parse_args()
load_program_config(config_path=options.datadir)
if options.schedule == '':
if ((len(args) < 2) or
(btc.is_bip21_uri(args[1]) and len(args) != 2) or
(not btc.is_bip21_uri(args[1]) and len(args) != 3)):
parser.error("Joinmarket sendpayment (coinjoin) needs arguments:"
" wallet, amount, destination address or wallet, bitcoin_uri.")
sys.exit(EXIT_ARGERROR)
#without schedule file option, use the arguments to create a schedule
#of a single transaction
sweeping = False
bip78url = None
if options.schedule == '':
if btc.is_bip21_uri(args[1]):
parsed = btc.decode_bip21_uri(args[1])
try:
amount = parsed['amount']
except KeyError:
parser.error("Given BIP21 URI does not contain amount.")
sys.exit(EXIT_ARGERROR)
destaddr = parsed['address']
if "pj" in parsed:
# note that this is a URL; its validity
# checking is deferred to twisted.web.client.Agent
bip78url = parsed["pj"]
# setting makercount only for fee sanity check.
# note we ignore any user setting and enforce N=0,
# as this is a flag in the code for a non-JM coinjoin;
# for the fee sanity check, note that BIP78 currently
# will only allow small fee changes, so N=0 won't
# be very inaccurate.
jmprint("Attempting to pay via payjoin.", "info")
options.makercount = 0
else:
amount = btc.amount_to_sat(args[1])
if amount == 0:
sweeping = True
destaddr = args[2]
mixdepth = options.mixdepth
addr_valid, errormsg = validate_address(destaddr)
command_to_burn = (is_burn_destination(destaddr) and sweeping and
options.makercount == 0)
if not addr_valid and not command_to_burn:
jmprint('ERROR: Address invalid. ' + errormsg, "error")
if is_burn_destination(destaddr):
jmprint("The required options for burning coins are zero makers"
+ " (-N 0), sweeping (amount = 0) and not using BIP78 Payjoin", "info")
sys.exit(EXIT_ARGERROR)
if sweeping == False and amount < DUST_THRESHOLD:
jmprint('ERROR: Amount ' + btc.amount_to_str(amount) +
' is below dust threshold ' +
btc.amount_to_str(DUST_THRESHOLD) + '.', "error")
sys.exit(EXIT_ARGERROR)
if (options.makercount != 0 and
options.makercount < jm_single().config.getint(
"POLICY", "minimum_makers")):
jmprint('ERROR: Maker count ' + str(options.makercount) +
' below minimum_makers (' + str(jm_single().config.getint(
"POLICY", "minimum_makers")) + ') in joinmarket.cfg.',
"error")
sys.exit(EXIT_ARGERROR)
schedule = [[options.mixdepth, amount, options.makercount,
destaddr, 0.0, NO_ROUNDING, 0]]
else:
if btc.is_bip21_uri(args[1]):
parser.error("Schedule files are not compatible with bip21 uris.")
sys.exit(EXIT_ARGERROR)
result, schedule = get_schedule(options.schedule)
if not result:
log.error("Failed to load schedule file, quitting. Check the syntax.")
log.error("Error was: " + str(schedule))
sys.exit(EXIT_FAILURE)
mixdepth = 0
for s in schedule:
if s[1] == 0:
sweeping = True
#only used for checking the maximum mixdepth required
mixdepth = max([mixdepth, s[0]])
wallet_name = args[0]
check_regtest()
if options.pickorders:
chooseOrdersFunc = pick_order
if sweeping:
jmprint('WARNING: You may have to pick offers multiple times', "warning")
jmprint('WARNING: due to manual offer picking while sweeping', "warning")
else:
chooseOrdersFunc = options.order_choose_fn
# If tx_fees are set manually by CLI argument, override joinmarket.cfg:
if int(options.txfee) > 0:
jm_single().config.set("POLICY", "tx_fees", str(options.txfee))
maxcjfee = (1, float('inf'))
if not options.pickorders and options.makercount != 0:
maxcjfee = get_max_cj_fee_values(jm_single().config, options)
log.info("Using maximum coinjoin fee limits per maker of {:.4%}, {} "
"".format(maxcjfee[0], btc.amount_to_str(maxcjfee[1])))
log.info('starting sendpayment')
max_mix_depth = max([mixdepth, options.amtmixdepths - 1])
wallet_path = get_wallet_path(wallet_name, None)
wallet = open_test_wallet_maybe(
wallet_path, wallet_name, max_mix_depth,
wallet_password_stdin=options.wallet_password_stdin,
gap_limit=options.gaplimit)
wallet_service = WalletService(wallet)
if wallet_service.rpc_error:
sys.exit(EXIT_FAILURE)
# in this script, we need the wallet synced before
# logic processing for some paths, so do it now:
while not wallet_service.synced:
wallet_service.sync_wallet(fast=not options.recoversync)
# the sync call here will now be a no-op:
wallet_service.startService()
# Dynamically estimate a realistic fee, for coinjoins.
# At this point we do not know even the number of our own inputs, so
# we guess conservatively with 2 inputs and 2 outputs each.
if options.makercount != 0:
fee_per_cp_guess = estimate_tx_fee(2, 2,
txtype=wallet_service.get_txtype())
log.debug("Estimated miner/tx fee for each cj participant: " + str(
fee_per_cp_guess))
# From the estimated tx fees, check if the expected amount is a
# significant value compared the the cj amount; currently enabled
# only for single join (the predominant, non-advanced case)
if options.schedule == '' and options.makercount != 0:
total_cj_amount = amount
if total_cj_amount == 0:
total_cj_amount = wallet_service.get_balance_by_mixdepth()[options.mixdepth]
if total_cj_amount == 0:
raise ValueError("No confirmed coins in the selected mixdepth. Quitting")
exp_tx_fees_ratio = ((1 + options.makercount) * fee_per_cp_guess) / total_cj_amount
if exp_tx_fees_ratio > 0.05:
jmprint('WARNING: Expected bitcoin network miner fees for this coinjoin'
' amount are roughly {:.1%}'.format(exp_tx_fees_ratio), "warning")
if input('You might want to modify your tx_fee'
' settings in joinmarket.cfg. Still continue? (y/n):')[0] != 'y':
sys.exit('Aborted by user.')
else:
log.info("Estimated miner/tx fees for this coinjoin amount: {:.1%}"
.format(exp_tx_fees_ratio))
if options.makercount == 0 and not bip78url:
tx = direct_send(wallet_service, amount, mixdepth, destaddr,
options.answeryes, with_final_psbt=options.with_psbt)
if options.with_psbt:
log.info("This PSBT is fully signed and can be sent externally for "
"broadcasting:")
log.info(tx.to_base64())
return
if wallet.get_txtype() == 'p2pkh':
jmprint("Only direct sends (use -N 0) are supported for "
"legacy (non-segwit) wallets.", "error")
sys.exit(EXIT_ARGERROR)
def filter_orders_callback(orders_fees, cjamount):
orders, total_cj_fee = orders_fees
log.info("Chose these orders: " +pprint.pformat(orders))
log.info('total cj fee = ' + str(total_cj_fee))
total_fee_pc = 1.0 * total_cj_fee / cjamount
log.info('total coinjoin fee = ' + str(float('%.3g' % (
100.0 * total_fee_pc))) + '%')
WARNING_THRESHOLD = 0.02 # 2%
if total_fee_pc > WARNING_THRESHOLD:
log.info('\n'.join(['=' * 60] * 3))
log.info('WARNING ' * 6)
log.info('\n'.join(['=' * 60] * 1))
log.info('OFFERED COINJOIN FEE IS UNUSUALLY HIGH. DOUBLE/TRIPLE CHECK.')
log.info('\n'.join(['=' * 60] * 1))
log.info('WARNING ' * 6)
log.info('\n'.join(['=' * 60] * 3))
if not options.answeryes:
if input('send with these orders? (y/n):')[0] != 'y':
return False
return True
def taker_finished(res, fromtx=False, waittime=0.0, txdetails=None):
if fromtx == "unconfirmed":
#If final entry, stop *here*, don't wait for confirmation
if taker.schedule_index + 1 == len(taker.schedule):
reactor.stop()
return
if fromtx:
if res:
txd, txid = txdetails
reactor.callLater(waittime*60,
clientfactory.getClient().clientStart)
else:
#a transaction failed; we'll try to repeat without the
#troublemakers.
#If this error condition is reached from Phase 1 processing,
#and there are less than minimum_makers honest responses, we
#just give up (note that in tumbler we tweak and retry, but
#for sendpayment the user is "online" and so can manually
#try again).
#However if the error is in Phase 2 and we have minimum_makers
#or more responses, we do try to restart with the honest set, here.
if taker.latest_tx is None:
#can only happen with < minimum_makers; see above.
log.info("A transaction failed but there are insufficient "
"honest respondants to continue; giving up.")
reactor.stop()
return
#This is Phase 2; do we have enough to try again?
taker.add_honest_makers(list(set(
taker.maker_utxo_data.keys()).symmetric_difference(
set(taker.nonrespondants))))
if len(taker.honest_makers) < jm_single().config.getint(
"POLICY", "minimum_makers"):
log.info("Too few makers responded honestly; "
"giving up this attempt.")
reactor.stop()
return
jmprint("We failed to complete the transaction. The following "
"makers responded honestly: " + str(taker.honest_makers) +\
", so we will retry with them.", "warning")
#Now we have to set the specific group we want to use, and hopefully
#they will respond again as they showed honesty last time.
#we must reset the number of counterparties, as well as fix who they
#are; this is because the number is used to e.g. calculate fees.
#cleanest way is to reset the number in the schedule before restart.
taker.schedule[taker.schedule_index][2] = len(taker.honest_makers)
log.info("Retrying with: " + str(taker.schedule[
taker.schedule_index][2]) + " counterparties.")
#rewind to try again (index is incremented in Taker.initialize())
taker.schedule_index -= 1
taker.set_honest_only(True)
reactor.callLater(5.0, clientfactory.getClient().clientStart)
else:
if not res:
log.info("Did not complete successfully, shutting down")
#Should usually be unreachable, unless conf received out of order;
#because we should stop on 'unconfirmed' for last (see above)
else:
log.info("All transactions completed correctly")
reactor.stop()
if bip78url:
# TODO sanity check wallet type is segwit
manager = parse_payjoin_setup(args[1], wallet_service, options.mixdepth)
reactor.callWhenRunning(send_payjoin, manager)
reactor.run()
return
else:
taker = Taker(wallet_service,
schedule,
order_chooser=chooseOrdersFunc,
max_cj_fee=maxcjfee,
callbacks=(filter_orders_callback, None, taker_finished))
clientfactory = JMClientProtocolFactory(taker)
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
if jm_single().config.get("BLOCKCHAIN", "network") == "regtest":
startLogging(sys.stdout)
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon)
| def main():
parser = get_sendpayment_parser()
(options, args) = parser.parse_args()
load_program_config(config_path=options.datadir)
if options.schedule == '':
if ((len(args) < 2) or
(btc.is_bip21_uri(args[1]) and len(args) != 2) or
(not btc.is_bip21_uri(args[1]) and len(args) != 3)):
parser.error("Joinmarket sendpayment (coinjoin) needs arguments:"
" wallet, amount, destination address or wallet, bitcoin_uri.")
sys.exit(EXIT_ARGERROR)
#without schedule file option, use the arguments to create a schedule
#of a single transaction
sweeping = False
bip78url = None
if options.schedule == '':
if btc.is_bip21_uri(args[1]):
parsed = btc.decode_bip21_uri(args[1])
try:
amount = parsed['amount']
except KeyError:
parser.error("Given BIP21 URI does not contain amount.")
sys.exit(EXIT_ARGERROR)
destaddr = parsed['address']
if "pj" in parsed:
# note that this is a URL; its validity
# checking is deferred to twisted.web.client.Agent
bip78url = parsed["pj"]
# setting makercount only for fee sanity check.
# note we ignore any user setting and enforce N=0,
# as this is a flag in the code for a non-JM coinjoin;
# for the fee sanity check, note that BIP78 currently
# will only allow small fee changes, so N=0 won't
# be very inaccurate.
jmprint("Attempting to pay via payjoin.", "info")
options.makercount = 0
else:
amount = btc.amount_to_sat(args[1])
if amount == 0:
sweeping = True
destaddr = args[2]
mixdepth = options.mixdepth
addr_valid, errormsg = validate_address(destaddr)
command_to_burn = (is_burn_destination(destaddr) and sweeping and
options.makercount == 0)
if not addr_valid and not command_to_burn:
jmprint('ERROR: Address invalid. ' + errormsg, "error")
if is_burn_destination(destaddr):
jmprint("The required options for burning coins are zero makers"
+ " (-N 0), sweeping (amount = 0) and not using BIP78 Payjoin", "info")
sys.exit(EXIT_ARGERROR)
if sweeping == False and amount < DUST_THRESHOLD:
jmprint('ERROR: Amount ' + btc.amount_to_str(amount) +
' is below dust threshold ' +
btc.amount_to_str(DUST_THRESHOLD) + '.', "error")
sys.exit(EXIT_ARGERROR)
if (options.makercount != 0 and
options.makercount < jm_single().config.getint(
"POLICY", "minimum_makers")):
jmprint('ERROR: Maker count ' + str(options.makercount) +
' below minimum_makers (' + str(jm_single().config.getint(
"POLICY", "minimum_makers")) + ') in joinmarket.cfg.',
"error")
sys.exit(EXIT_ARGERROR)
schedule = [[options.mixdepth, amount, options.makercount,
destaddr, 0.0, NO_ROUNDING, 0]]
else:
if btc.is_bip21_uri(args[1]):
parser.error("Schedule files are not compatible with bip21 uris.")
sys.exit(EXIT_ARGERROR)
result, schedule = get_schedule(options.schedule)
if not result:
log.error("Failed to load schedule file, quitting. Check the syntax.")
log.error("Error was: " + str(schedule))
sys.exit(EXIT_FAILURE)
mixdepth = 0
for s in schedule:
if s[1] == 0:
sweeping = True
#only used for checking the maximum mixdepth required
mixdepth = max([mixdepth, s[0]])
wallet_name = args[0]
check_regtest()
if options.pickorders:
chooseOrdersFunc = pick_order
if sweeping:
jmprint('WARNING: You may have to pick offers multiple times', "warning")
jmprint('WARNING: due to manual offer picking while sweeping', "warning")
else:
chooseOrdersFunc = options.order_choose_fn
# If tx_fees are set manually by CLI argument, override joinmarket.cfg:
if int(options.txfee) > 0:
jm_single().config.set("POLICY", "tx_fees", str(options.txfee))
maxcjfee = (1, float('inf'))
if not options.pickorders and options.makercount != 0:
maxcjfee = get_max_cj_fee_values(jm_single().config, options)
log.info("Using maximum coinjoin fee limits per maker of {:.4%}, {} "
"".format(maxcjfee[0], btc.amount_to_str(maxcjfee[1])))
log.info('starting sendpayment')
max_mix_depth = max([mixdepth, options.amtmixdepths - 1])
wallet_path = get_wallet_path(wallet_name, None)
wallet = open_test_wallet_maybe(
wallet_path, wallet_name, max_mix_depth,
wallet_password_stdin=options.wallet_password_stdin,
gap_limit=options.gaplimit)
wallet_service = WalletService(wallet)
if wallet_service.rpc_error:
sys.exit(EXIT_FAILURE)
# in this script, we need the wallet synced before
# logic processing for some paths, so do it now:
while not wallet_service.synced:
wallet_service.sync_wallet(fast=not options.recoversync)
# the sync call here will now be a no-op:
wallet_service.startService()
# Dynamically estimate a realistic fee, for coinjoins.
# At this point we do not know even the number of our own inputs, so
# we guess conservatively with 2 inputs and 2 outputs each.
if options.makercount != 0:
fee_per_cp_guess = estimate_tx_fee(2, 2,
txtype=wallet_service.get_txtype())
log.debug("Estimated miner/tx fee for each cj participant: " + btc.amount_to_str(
fee_per_cp_guess))
# From the estimated tx fees, check if the expected amount is a
# significant value compared the the cj amount; currently enabled
# only for single join (the predominant, non-advanced case)
if options.schedule == '' and options.makercount != 0:
total_cj_amount = amount
if total_cj_amount == 0:
total_cj_amount = wallet_service.get_balance_by_mixdepth()[options.mixdepth]
if total_cj_amount == 0:
raise ValueError("No confirmed coins in the selected mixdepth. Quitting")
exp_tx_fees_ratio = ((1 + options.makercount) * fee_per_cp_guess) / total_cj_amount
if exp_tx_fees_ratio > 0.05:
jmprint('WARNING: Expected bitcoin network miner fees for this coinjoin'
' amount are roughly {:.1%}'.format(exp_tx_fees_ratio), "warning")
if input('You might want to modify your tx_fee'
' settings in joinmarket.cfg. Still continue? (y/n):')[0] != 'y':
sys.exit('Aborted by user.')
else:
log.info("Estimated miner/tx fees for this coinjoin amount: {:.1%}"
.format(exp_tx_fees_ratio))
if options.makercount == 0 and not bip78url:
tx = direct_send(wallet_service, amount, mixdepth, destaddr,
options.answeryes, with_final_psbt=options.with_psbt)
if options.with_psbt:
log.info("This PSBT is fully signed and can be sent externally for "
"broadcasting:")
log.info(tx.to_base64())
return
if wallet.get_txtype() == 'p2pkh':
jmprint("Only direct sends (use -N 0) are supported for "
"legacy (non-segwit) wallets.", "error")
sys.exit(EXIT_ARGERROR)
def filter_orders_callback(orders_fees, cjamount):
orders, total_cj_fee = orders_fees
log.info("Chose these orders: " +pprint.pformat(orders))
log.info('total cj fee = ' + str(total_cj_fee))
total_fee_pc = 1.0 * total_cj_fee / cjamount
log.info('total coinjoin fee = ' + str(float('%.3g' % (
100.0 * total_fee_pc))) + '%')
WARNING_THRESHOLD = 0.02 # 2%
if total_fee_pc > WARNING_THRESHOLD:
log.info('\n'.join(['=' * 60] * 3))
log.info('WARNING ' * 6)
log.info('\n'.join(['=' * 60] * 1))
log.info('OFFERED COINJOIN FEE IS UNUSUALLY HIGH. DOUBLE/TRIPLE CHECK.')
log.info('\n'.join(['=' * 60] * 1))
log.info('WARNING ' * 6)
log.info('\n'.join(['=' * 60] * 3))
if not options.answeryes:
if input('send with these orders? (y/n):')[0] != 'y':
return False
return True
def taker_finished(res, fromtx=False, waittime=0.0, txdetails=None):
if fromtx == "unconfirmed":
#If final entry, stop *here*, don't wait for confirmation
if taker.schedule_index + 1 == len(taker.schedule):
reactor.stop()
return
if fromtx:
if res:
txd, txid = txdetails
reactor.callLater(waittime*60,
clientfactory.getClient().clientStart)
else:
#a transaction failed; we'll try to repeat without the
#troublemakers.
#If this error condition is reached from Phase 1 processing,
#and there are less than minimum_makers honest responses, we
#just give up (note that in tumbler we tweak and retry, but
#for sendpayment the user is "online" and so can manually
#try again).
#However if the error is in Phase 2 and we have minimum_makers
#or more responses, we do try to restart with the honest set, here.
if taker.latest_tx is None:
#can only happen with < minimum_makers; see above.
log.info("A transaction failed but there are insufficient "
"honest respondants to continue; giving up.")
reactor.stop()
return
#This is Phase 2; do we have enough to try again?
taker.add_honest_makers(list(set(
taker.maker_utxo_data.keys()).symmetric_difference(
set(taker.nonrespondants))))
if len(taker.honest_makers) < jm_single().config.getint(
"POLICY", "minimum_makers"):
log.info("Too few makers responded honestly; "
"giving up this attempt.")
reactor.stop()
return
jmprint("We failed to complete the transaction. The following "
"makers responded honestly: " + str(taker.honest_makers) +\
", so we will retry with them.", "warning")
#Now we have to set the specific group we want to use, and hopefully
#they will respond again as they showed honesty last time.
#we must reset the number of counterparties, as well as fix who they
#are; this is because the number is used to e.g. calculate fees.
#cleanest way is to reset the number in the schedule before restart.
taker.schedule[taker.schedule_index][2] = len(taker.honest_makers)
log.info("Retrying with: " + str(taker.schedule[
taker.schedule_index][2]) + " counterparties.")
#rewind to try again (index is incremented in Taker.initialize())
taker.schedule_index -= 1
taker.set_honest_only(True)
reactor.callLater(5.0, clientfactory.getClient().clientStart)
else:
if not res:
log.info("Did not complete successfully, shutting down")
#Should usually be unreachable, unless conf received out of order;
#because we should stop on 'unconfirmed' for last (see above)
else:
log.info("All transactions completed correctly")
reactor.stop()
if bip78url:
# TODO sanity check wallet type is segwit
manager = parse_payjoin_setup(args[1], wallet_service, options.mixdepth)
reactor.callWhenRunning(send_payjoin, manager)
reactor.run()
return
else:
taker = Taker(wallet_service,
schedule,
order_chooser=chooseOrdersFunc,
max_cj_fee=maxcjfee,
callbacks=(filter_orders_callback, None, taker_finished))
clientfactory = JMClientProtocolFactory(taker)
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
if jm_single().config.get("BLOCKCHAIN", "network") == "regtest":
startLogging(sys.stdout)
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon)
|
32,266 | def get_modified_remote_data_command(client: Client, args: Dict[str, Any], params: Dict) -> GetModifiedRemoteDataResponse:
remote_args = GetModifiedRemoteDataArgs(args)
query_date = dateparser.parse(remote_args.last_update,
settings={'TIMEZONE': 'UTC'}).strftime('%Y-%m-%dT%H:%M:%SZ') # type: ignore
assert query_date is not None
demisto.debug(f'Running get-modified-remote-data command. Last update is: {query_date}')
topdesk_incidents = get_incidents_with_pagination(client=client,
max_fetch=int(params.get('max_fetch', 20)),
query=f"modificationDate=gt={query_date}")
modified_records_ids = []
if topdesk_incidents:
modified_records_ids = [topdesk_incident['id'] for topdesk_incident in topdesk_incidents if 'id' in topdesk_incident]
return GetModifiedRemoteDataResponse(modified_records_ids)
| def get_modified_remote_data_command(client: Client, args: Dict[str, Any], params: Dict) -> GetModifiedRemoteDataResponse:
remote_args = GetModifiedRemoteDataArgs(args)
query_date = dateparser.parse(remote_args.last_update,
settings={'TIMEZONE': 'UTC'}).strftime(DATE_FORMAT) # type: ignore
assert query_date is not None
demisto.debug(f'Running get-modified-remote-data command. Last update is: {query_date}')
topdesk_incidents = get_incidents_with_pagination(client=client,
max_fetch=int(params.get('max_fetch', 20)),
query=f"modificationDate=gt={query_date}")
modified_records_ids = []
if topdesk_incidents:
modified_records_ids = [topdesk_incident['id'] for topdesk_incident in topdesk_incidents if 'id' in topdesk_incident]
return GetModifiedRemoteDataResponse(modified_records_ids)
|
10,201 | def main():
argument_spec = RabbitClient.rabbitmq_argument_spec()
argument_spec.update(
exchange=dict(type='str', default=''),
routing_key=dict(type='str', required=False),
body=dict(type='str', required=False),
src=dict(aliases=['file'], type='path', required=False),
content_type=dict(default="text/plain", type='str'),
durable=dict(default=False, type='bool'),
exclusive=dict(default=False, type='bool'),
auto_delete=dict(default=False, type='bool'),
headers=dict(default={}, type='dict'),
cafile=dict(type='str', required=False),
certfile=dict(type='str', required=False),
keyfile=dict(type='str', required=False)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['body', 'src']],
required_together=[['cafile', 'certfile', 'keyfile']],
supports_check_mode=False
)
rabbitmq = RabbitClient(module)
if rabbitmq.basic_publish():
rabbitmq.close_connection()
module.exit_json(changed=True, result={"msg": "Successfully published to queue %s" % rabbitmq.queue,
"queue": rabbitmq.queue,
"content_type": rabbitmq.content_type})
else:
rabbitmq.close_connection()
module.fail_json(changed=False, msg="Unsuccessful publishing to queue %s" % rabbitmq.queue)
| def main():
argument_spec = RabbitClient.rabbitmq_argument_spec()
argument_spec.update(
exchange=dict(type='str', default=''),
routing_key=dict(type='str', required=False),
body=dict(type='str', required=False),
src=dict(aliases=['file'], type='path', required=False),
content_type=dict(default="text/plain", type='str'),
durable=dict(default=False, type='bool'),
exclusive=dict(default=False, type='bool'),
auto_delete=dict(default=False, type='bool'),
headers=dict(default={}, type='dict'),
cafile=dict(type='str', required=False),
certfile=dict(type='str', required=False),
keyfile=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['body', 'src']],
required_together=[['cafile', 'certfile', 'keyfile']],
supports_check_mode=False
)
rabbitmq = RabbitClient(module)
if rabbitmq.basic_publish():
rabbitmq.close_connection()
module.exit_json(changed=True, result={"msg": "Successfully published to queue %s" % rabbitmq.queue,
"queue": rabbitmq.queue,
"content_type": rabbitmq.content_type})
else:
rabbitmq.close_connection()
module.fail_json(changed=False, msg="Unsuccessful publishing to queue %s" % rabbitmq.queue)
|
5,740 | def van_der_corput(
n: IntNumber,
base: IntNumber = 2,
*,
start_index: IntNumber = 0,
scramble: bool = False,
seed: SeedType = None) -> np.ndarray:
"""Van der Corput sequence.
Pseudo-random number generator based on a b-adic expansion.
Scrambling uses permutations of the remainders (see [1]_). Multiple
permutations are applied to construct a point. The sequence of
permutations has to be the same for all points of the sequence.
Parameters
----------
n : int
Number of element of the sequence.
base : int, optional
Base of the sequence. Default is 2.
start_index : int, optional
Index to start the sequence from. Default is 0.
scramble : bool, optional
If True, use Owen scrambling. Otherwise no scrambling is done.
Default is True.
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Returns
-------
sequence : list (n,)
Sequence of Van der Corput.
References
----------
.. [1] A. B. Owen. "A randomized Halton algorithm in R",
arXiv:1706.02808, 2017.
"""
if base < 2:
raise ValueError('base must be at least 2')
if scramble:
rng = check_random_state(seed)
# Create a set of permutation of np.arange(base) for each positive
# integer k such that base**-k > 2**-54. The number of permutations is
# equal to the number of iterations in the while loop in Algorithm 1 of
# Owen 2017.
count = math.ceil(54 / math.log2(base)) - 1
permutations = np.repeat(np.arange(base)[None], count, axis=0)
for perm in permutations:
rng.shuffle(perm)
return _cy_van_der_corput_scrambled(n, base, start_index, permutations)
else:
return _cy_van_der_corput(n, base, start_index)
| def van_der_corput(
n: IntNumber,
base: IntNumber = 2,
*,
start_index: IntNumber = 0,
scramble: bool = False,
seed: SeedType = None) -> np.ndarray:
"""Van der Corput sequence.
Pseudo-random number generator based on a b-adic expansion.
Scrambling uses permutations of the remainders (see [1]_). Multiple
permutations are applied to construct a point. The sequence of
permutations has to be the same for all points of the sequence.
Parameters
----------
n : int
Number of element of the sequence.
base : int, optional
Base of the sequence. Default is 2.
start_index : int, optional
Index to start the sequence from. Default is 0.
scramble : bool, optional
If True, use Owen scrambling. Otherwise no scrambling is done.
Default is True.
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Returns
-------
sequence : list (n,)
Sequence of Van der Corput.
References
----------
.. [1] A. B. Owen. "A randomized Halton algorithm in R",
arXiv:1706.02808, 2017.
"""
if base < 2:
raise ValueError("'base' must be at least 2")
if scramble:
rng = check_random_state(seed)
# Create a set of permutation of np.arange(base) for each positive
# integer k such that base**-k > 2**-54. The number of permutations is
# equal to the number of iterations in the while loop in Algorithm 1 of
# Owen 2017.
count = math.ceil(54 / math.log2(base)) - 1
permutations = np.repeat(np.arange(base)[None], count, axis=0)
for perm in permutations:
rng.shuffle(perm)
return _cy_van_der_corput_scrambled(n, base, start_index, permutations)
else:
return _cy_van_der_corput(n, base, start_index)
|
33,022 | def pois_from_address(address, distance, tags=None, return_query=False, **kwargs):
"""
Get OSM points of Interests within some distance north, south, east, and west of
an address.
Parameters
----------
address : string
the address to geocode to a lat-long point
distance : numeric
distance in meters
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
Keyword Arguments
-----------------
amenities : list
If provided, will override tags with the dictionary {'amenities':amenities}.
Accommodates the depricatd amenities parameter.
Returns
-------
GeoDataFrame
"""
if 'amenities' in locals():
tags = {'amenity':amenities}
# geocode the address string to a (lat, lon) point
point = geocode(query=address)
# get buildings within distance of this point
return pois_from_point(point=point, distance=distance, tags=tags, return_query=return_query)
| def pois_from_address(address, distance, tags=None, return_query=False, **kwargs):
"""
Get OSM points of Interests within some distance north, south, east, and west of
an address.
Parameters
----------
address : string
the address to geocode to a lat-long point
distance : numeric
distance in meters
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
Keyword Arguments
-----------------
amenities : list
If provided, will override tags with the dictionary {'amenities':amenities}.
Accommodates the deprecated amenities parameter.
Returns
-------
GeoDataFrame
"""
if 'amenities' in locals():
tags = {'amenity':amenities}
# geocode the address string to a (lat, lon) point
point = geocode(query=address)
# get buildings within distance of this point
return pois_from_point(point=point, distance=distance, tags=tags, return_query=return_query)
|
32,291 | def check_spyware_profiles(
topology: Topology,
device_filter_string: str = None,
minimum_block_severities: str = "critical,high",
minimum_alert_severities: str = "medium,low"
) -> ConfigurationHygieneCheckResult:
"""
Checks the configured Anti-spyware profiles to ensure at least one meets best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
:param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode.
:param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode.
"""
return HygieneLookups.check_spyware_profiles(
topology,
device_filter_str=device_filter_string,
minimum_block_severities=minimum_block_severities.split(","),
minimum_alert_severities=minimum_alert_severities.split(",")
)
| def check_spyware_profiles(
topology: Topology,
device_filter_string: Optional[str] = None,
minimum_block_severities: str = "critical,high",
minimum_alert_severities: str = "medium,low"
) -> ConfigurationHygieneCheckResult:
"""
Checks the configured Anti-spyware profiles to ensure at least one meets best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
:param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode.
:param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode.
"""
return HygieneLookups.check_spyware_profiles(
topology,
device_filter_str=device_filter_string,
minimum_block_severities=minimum_block_severities.split(","),
minimum_alert_severities=minimum_alert_severities.split(",")
)
|
43,667 | def excitations_to_wires(singles, doubles, wires=None):
r"""Map the indices representing the single and double excitations
generated with the function :func:`~.excitations` to the wires that
the Unitary Coupled-Cluster (UCCSD) template will act on.
Args:
singles (list[list[int]]): List with the indices ``r``, ``p`` of the two qubits
representing the single excitation
:math:`\vert r, p \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`.
doubles (list[list[int]]): list with the indices ``s``, ``r``, ``q``, ``p`` of the four
qubits representing the double excitation
:math:`\vert s, r, q, p \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`.
wires (Iterable[Any]): Wires of the quantum device. If None, will use consecutive wires.
The indices :math:`r, s` and :math:`p, q` in these lists correspond, respectively, to the
occupied and virtual orbitals involved in the generated single and double excitations.
Returns:
tuple(list[list[Any]], list[list[list[Any]]]): lists with the sequence of wires,
resulting from the single and double excitations, that the Unitary Coupled-Cluster
(UCCSD) template will act on.
**Example**
>>> singles = [[0, 2], [1, 3]]
>>> doubles = [[0, 1, 2, 3]]
>>> singles_wires, doubles_wires = excitations_to_wires(singles, doubles)
>>> print(single_wires)
[[0, 1, 2], [1, 2, 3]]
>>> print(doubles_wires)
[[[0, 1], [2, 3]]]
>>> wires=['a0', 'b1', 'c2', 'd3']
>>> singles_wires, doubles_wires = excitations_to_wires(singles, doubles, wires=wires)
>>> print(singles_wires)
[['a0', 'b1', 'c2'], ['b1', 'c2', 'd3']]
>>> print(doubles_wires)
[[['a0', 'b1'], ['c2', 'd3']]]
"""
if (not singles) and (not doubles):
raise ValueError(
"'singles' and 'doubles' lists can not be both empty;\
got singles = {}, doubles = {}".format(
singles, doubles
)
)
expected_shape = (2,)
for single_ in singles:
if np.array(single_).shape != expected_shape:
raise ValueError(
"Expected entries of 'singles' to be of shape (2,); got {}".format(
np.array(single_).shape
)
)
expected_shape = (4,)
for double_ in doubles:
if np.array(double_).shape != expected_shape:
raise ValueError(
"Expected entries of 'doubles' to be of shape (4,); got {}".format(
np.array(double_).shape
)
)
max_idx = 0
if singles:
max_idx = np.max(singles)
if doubles:
max_idx = max(np.max(doubles), max_idx)
if wires is None:
wires = range(max_idx + 1)
elif len(wires) != max_idx + 1:
raise ValueError("Expected number of wires is {}; got {}".format(max_idx + 1, len(wires)))
singles_wires = []
for r, p in singles:
s_wires = [wires[i] for i in range(r, p + 1)]
singles_wires.append(s_wires)
doubles_wires = []
for s, r, q, p in doubles:
d1_wires = [wires[i] for i in range(s, r + 1)]
d2_wires = [wires[i] for i in range(q, p + 1)]
doubles_wires.append([d1_wires, d2_wires])
return singles_wires, doubles_wires
| def excitations_to_wires(singles, doubles, wires=None):
r"""Map the indices representing the single and double excitations
generated with the function :func:`~.excitations` to the wires that
the Unitary Coupled-Cluster (UCCSD) template will act on.
Args:
singles (list[list[int]]): List with the indices ``r``, ``p`` of the two qubits
representing the single excitation
:math:`\vert r, p \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`.
doubles (list[list[int]]): list with the indices ``s``, ``r``, ``q``, ``p`` of the four
qubits representing the double excitation
:math:`\vert s, r, q, p \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`.
wires (Iterable[Any]): Wires of the quantum device. If None, will use consecutive wires.
The indices :math:`r, s` and :math:`p, q` in these lists correspond, respectively, to the
occupied and virtual orbitals involved in the generated single and double excitations.
Returns:
tuple[list[list[Any]], list[list[list[Any]]]]: lists with the sequence of wires,
resulting from the single and double excitations, that the Unitary Coupled-Cluster
(UCCSD) template will act on.
**Example**
>>> singles = [[0, 2], [1, 3]]
>>> doubles = [[0, 1, 2, 3]]
>>> singles_wires, doubles_wires = excitations_to_wires(singles, doubles)
>>> print(single_wires)
[[0, 1, 2], [1, 2, 3]]
>>> print(doubles_wires)
[[[0, 1], [2, 3]]]
>>> wires=['a0', 'b1', 'c2', 'd3']
>>> singles_wires, doubles_wires = excitations_to_wires(singles, doubles, wires=wires)
>>> print(singles_wires)
[['a0', 'b1', 'c2'], ['b1', 'c2', 'd3']]
>>> print(doubles_wires)
[[['a0', 'b1'], ['c2', 'd3']]]
"""
if (not singles) and (not doubles):
raise ValueError(
"'singles' and 'doubles' lists can not be both empty;\
got singles = {}, doubles = {}".format(
singles, doubles
)
)
expected_shape = (2,)
for single_ in singles:
if np.array(single_).shape != expected_shape:
raise ValueError(
"Expected entries of 'singles' to be of shape (2,); got {}".format(
np.array(single_).shape
)
)
expected_shape = (4,)
for double_ in doubles:
if np.array(double_).shape != expected_shape:
raise ValueError(
"Expected entries of 'doubles' to be of shape (4,); got {}".format(
np.array(double_).shape
)
)
max_idx = 0
if singles:
max_idx = np.max(singles)
if doubles:
max_idx = max(np.max(doubles), max_idx)
if wires is None:
wires = range(max_idx + 1)
elif len(wires) != max_idx + 1:
raise ValueError("Expected number of wires is {}; got {}".format(max_idx + 1, len(wires)))
singles_wires = []
for r, p in singles:
s_wires = [wires[i] for i in range(r, p + 1)]
singles_wires.append(s_wires)
doubles_wires = []
for s, r, q, p in doubles:
d1_wires = [wires[i] for i in range(s, r + 1)]
d2_wires = [wires[i] for i in range(q, p + 1)]
doubles_wires.append([d1_wires, d2_wires])
return singles_wires, doubles_wires
|
57,819 | def create_process_command(
credentials: Dict, sensor_id: int,
command_string: str,
wait_timeout: int = 30,
wait_for_output: bool = True,
wait_for_completion: bool = True,
**additional_params):
# additional_param may include: remote_output_file_name: str, working_directory: str
api = CBCloudAPI(**credentials)
session = api.select(endpoint_standard.Device, sensor_id).lr_session()
res = session.create_process(
command_string=command_string,
wait_timeout=int(wait_timeout),
wait_for_output=str(wait_for_output) == 'True',
wait_for_completion=str(wait_for_completion) == 'True',
**additional_params)
if res:
return res
return f'Command: {command_string} was executed'
| def create_process_command(
credentials: Dict, sensor_id: int,
command_string: str,
wait_timeout: int = 30,
wait_for_output: bool = True,
wait_for_completion: bool = True,
**additional_params):
# additional_param may include: remote_output_file_name: str, working_directory: str
api = CBCloudAPI(**credentials)
session = api.select(endpoint_standard.Device, sensor_id).lr_session()
res = session.create_process(
command_string=command_string,
wait_timeout=int(wait_timeout),
wait_for_output=str(wait_for_output) == 'True',
wait_for_completion=str(wait_for_completion) == 'True',
**additional_params,
)
if res:
return res
return f'Command: {command_string} was executed'
|
34,173 | def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.cli.arguments.train as core_cli
train_parser = subparsers.add_parser(
"train",
help="Train a Rasa model using your NLU data and stories.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
arguments.train.set_train_arguments(train_parser)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Train a Rasa Core model using your stories.",
)
train_core_parser.set_defaults(func=train_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Train a Rasa NLU model using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_nlu)
train_parser.set_defaults(func=train)
arguments.train.set_train_core_arguments(train_core_parser)
arguments.train.set_train_nlu_arguments(train_nlu_parser)
| def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
import rasa.cli.arguments.train as core_cli
train_parser = subparsers.add_parser(
"train",
help="Trains a Rasa model using your NLU data and stories.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
arguments.train.set_train_arguments(train_parser)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Train a Rasa Core model using your stories.",
)
train_core_parser.set_defaults(func=train_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Train a Rasa NLU model using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_nlu)
train_parser.set_defaults(func=train)
arguments.train.set_train_core_arguments(train_core_parser)
arguments.train.set_train_nlu_arguments(train_nlu_parser)
|
34,970 | def conv3d_ndhwc(
Input,
Filter,
stride,
padding,
dilation,
out_dtype="float32",
auto_scheduler_rewritten_layout="",
):
"""Convolution operator in NDHWC layout.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_depth, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
5-D with shape [filter_depth, filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
out_dtype: str = "float32",
The type of output tensor
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel]
"""
assert isinstance(stride, int) or len(stride) == 3
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
batch, in_depth, in_height, in_width, in_channel = Input.shape
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
(
kernel_d,
kernel_h,
kernel_w,
channel,
num_filter,
) = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["rd", "rh", "rw", "rc", "cc"]
)
auto_scheduler.remove_index_check(Filter)
else:
kernel_d, kernel_h, kernel_w, channel, num_filter = Filter.shape
# compute the output shape
dilated_kernel_d = (kernel_d - 1) * dilation_d + 1
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_depth = simplify((in_depth - dilated_kernel_d + pad_front + pad_back) // stride_d + 1)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_front, pad_top, pad_left, 0]
pad_after = [0, pad_back, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rd = te.reduce_axis((0, kernel_d), name="rd")
rh = te.reduce_axis((0, kernel_h), name="rh")
rw = te.reduce_axis((0, kernel_w), name="rw")
rc = te.reduce_axis((0, in_channel), name="rc")
Output = te.compute(
(batch, out_depth, out_height, out_width, out_channel),
lambda nn, dd, hh, ww, cc: te.sum(
PaddedInput[
nn,
dd * stride_d + rd * dilation_d,
hh * stride_h + rh * dilation_h,
ww * stride_w + rw * dilation_w,
rc,
].astype(out_dtype)
* Filter[rd, rh, rw, rc, cc].astype(out_dtype),
axis=[rd, rh, rw, rc],
),
name="Conv3dOutput",
tag="conv3d_ndhwc",
attrs={"layout_free_placeholders": [Filter]},
)
if auto_scheduler_rewritten_layout:
Output = auto_scheduler.rewrite_compute_body(Output, auto_scheduler_rewritten_layout)
return Output
| def conv3d_ndhwc(
Input,
Filter,
stride,
padding,
dilation,
out_dtype="float32",
auto_scheduler_rewritten_layout="",
):
"""Convolution operator in NDHWC layout.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_depth, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
5-D with shape [filter_depth, filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
out_dtype: str = "float32",
The type of output tensor
auto_scheduler_rewritten_layout: str = ""
The layout after auto-scheduler's layout rewrite pass.
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel]
"""
assert isinstance(stride, int) or len(stride) == 3
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
batch, in_depth, in_height, in_width, in_channel = Input.shape
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
(
kernel_d,
kernel_h,
kernel_w,
channel,
num_filter,
) = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["rd", "rh", "rw", "rc", "cc"]
)
auto_scheduler.remove_index_check(Filter)
else:
kernel_d, kernel_h, kernel_w, channel, num_filter = Filter.shape
# compute the output shape
dilated_kernel_d = (kernel_d - 1) * dilation_d + 1
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_depth = simplify((in_depth - dilated_kernel_d + pad_front + pad_back) // stride_d + 1)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_front, pad_top, pad_left, 0]
pad_after = [0, pad_back, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rd = te.reduce_axis((0, kernel_d), name="rd")
rh = te.reduce_axis((0, kernel_h), name="rh")
rw = te.reduce_axis((0, kernel_w), name="rw")
rc = te.reduce_axis((0, in_channel), name="rc")
Output = te.compute(
(batch, out_depth, out_height, out_width, out_channel),
lambda nn, dd, hh, ww, cc: te.sum(
PaddedInput[
nn,
dd * stride_d + rd * dilation_d,
hh * stride_h + rh * dilation_h,
ww * stride_w + rw * dilation_w,
rc,
].astype(out_dtype)
* Filter[rd, rh, rw, rc, cc].astype(out_dtype),
axis=[rd, rh, rw, rc],
),
name="Conv3dOutput",
tag="conv3d_ndhwc",
attrs={"layout_free_placeholders": [Filter]},
)
return auto_scheduler.rewrite_compute_body(Output, auto_scheduler_rewritten_layout)
|
34,629 | def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all parsers for training in chunks.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
train_parser = subparsers.add_parser(
"train-in-chunks",
help="Trains a Rasa model in smaller chunks using your NLU data and stories.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa Core model in smaller chunks using your stories.",
)
train_core_parser.set_defaults(func=train_chunks_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa NLU model in smaller chunks using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_chunks_nlu)
train_arguments.set_train_in_chunks_core_arguments(train_core_parser)
train_arguments.set_train_in_chunks_nlu_arguments(train_nlu_parser)
train_arguments.set_train_in_chunks_arguments(train_parser)
train_parser.set_defaults(func=train_chunks)
| def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all parsers for training in chunks.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
train_parser = subparsers.add_parser(
"train-in-chunks",
help="Trains a Rasa model in smaller chunks using your NLU data and stories. Use to reduce the memory consumption during training.",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
train_subparsers = train_parser.add_subparsers()
train_core_parser = train_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa Core model in smaller chunks using your stories.",
)
train_core_parser.set_defaults(func=train_chunks_core)
train_nlu_parser = train_subparsers.add_parser(
"nlu",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Trains a Rasa NLU model in smaller chunks using your NLU data.",
)
train_nlu_parser.set_defaults(func=train_chunks_nlu)
train_arguments.set_train_in_chunks_core_arguments(train_core_parser)
train_arguments.set_train_in_chunks_nlu_arguments(train_nlu_parser)
train_arguments.set_train_in_chunks_arguments(train_parser)
train_parser.set_defaults(func=train_chunks)
|
35,487 | def extract_casync_image(target_slot_number: int, partition: dict, cloudlog):
path = get_partition_path(target_slot_number, partition)
seed_path = path[:-1] + ('b' if path[-1] == 'a' else 'a')
target = casync.parse_caibx(partition['casync_caibx'])
sources = []
# First source is the current partition. Index file for current version is provided in the manifest
# TODO: build url based on agnos version or parition hash instead?
if 'casync_seed_caibx' in partition:
sources += [('seed', functools.partial(casync.read_chunk_local_file, f=open(seed_path, 'rb')), casync.build_chunk_dict(casync.parse_caibx(partition['casync_seed_caibx'])))]
# Second source is the target partition, this allows for resuming
sources += [('target', functools.partial(casync.read_chunk_local_file, f=open(path, 'rb')), casync.build_chunk_dict(target))]
# Finally we add the remote source to download any missing chunks
sources += [('remote', functools.partial(casync.read_chunk_remote_store, store_path=partition['casync_store']), casync.build_chunk_dict(target))]
last_p = 0
def progress(cur):
nonlocal last_p
p = int(cur / partition['size'] * 100)
if p != last_p:
last_p = p
print(f"Installing {partition['name']}: {p}", flush=True)
stats = casync.extract(target, sources, path, progress)
cloudlog.error(f'casync done {json.dumps(stats)}')
os.sync()
if not verify_partition(target_slot_number, partition, force_full_check=True):
raise Exception(f"Raw hash mismatch '{partition['hash_raw'].lower()}'")
| def extract_casync_image(target_slot_number: int, partition: dict, cloudlog):
path = get_partition_path(target_slot_number, partition)
seed_path = path[:-1] + ('b' if path[-1] == 'a' else 'a')
target = casync.parse_caibx(partition['casync_caibx'])
sources = []
# First source is the current partition. Index file for current version is provided in the manifest
# TODO: build url based on agnos version or parition hash instead?
if 'casync_seed_caibx' in partition:
sources += [('seed', functools.partial(casync.read_chunk_local_file, f=open(seed_path, 'rb')), casync.build_chunk_dict(casync.parse_caibx(partition['casync_seed_caibx'])))]
# Second source is the target partition, this allows for resuming
sources += [('target', functools.partial(casync.read_chunk_local_file, f=open(path, 'rb')), casync.build_chunk_dict(target))]
# Finally we add the remote source to download any missing chunks
sources += [('remote', functools.partial(casync.read_chunk_remote_store, store_path=partition['casync_store']), casync.build_chunk_dict(target))]
last_p = 0
def progress(cur):
nonlocal last_p
p = int(cur / partition['size'] * 100)
if p != last_p:
last_p = p
print(f"Installing {partition['name']}: {p}", flush=True)
cloudlog.event(f'casync done', stats=stats, error=True)
cloudlog.error(f'casync done {json.dumps(stats)}')
os.sync()
if not verify_partition(target_slot_number, partition, force_full_check=True):
raise Exception(f"Raw hash mismatch '{partition['hash_raw'].lower()}'")
|
38,900 | def model_schema(model: Type['BaseModel'], by_alias: bool = True, ref_prefix: Optional[str] = None) -> Dict[str, Any]:
"""
Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level
JSON key.
:param model: a Pydantic model (a class that inherits from BaseModel)
:param by_alias: generate the schemas using the aliases defined, if any
:param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the
default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere
else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the
top-level key ``definitions``, so you can extract them from there. But all the references will have the set
prefix.
:return: dict with the JSON Schema for the passed ``model``
"""
ref_prefix = ref_prefix or default_prefix
flat_models = get_flat_models_from_model(model)
model_name_map = get_model_name_map(flat_models)
model_name = model_name_map[model]
m_schema, m_definitions = model_process_schema(
model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
if model_name in m_definitions:
# m_definitions[model_name] is None, it has circular references
m_definitions[model_name] = m_schema
m_schema = {'$ref': f'{ref_prefix}{model_name}'}
if m_definitions:
m_schema.update({'definitions': m_definitions})
return m_schema
| def model_schema(model: Type['BaseModel'], by_alias: bool = True, ref_prefix: Optional[str] = None) -> Dict[str, Any]:
"""
Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level
JSON key.
:param model: a Pydantic model (a class that inherits from BaseModel)
:param by_alias: generate the schemas using the aliases defined, if any
:param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the
default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere
else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the
top-level key ``definitions``, so you can extract them from there. But all the references will have the set
prefix.
:return: dict with the JSON Schema for the passed ``model``
"""
ref_prefix = ref_prefix or default_prefix
flat_models = get_flat_models_from_model(model)
model_name_map = get_model_name_map(flat_models)
model_name = model_name_map[model]
m_schema, m_definitions = model_process_schema(
model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
if model_name in m_definitions:
# m_definitions[model_name] is None, it has circular references
m_definitions[model_name] = m_schema
m_schema = {'$ref': ref_prefix + model_name}
if m_definitions:
m_schema.update({'definitions': m_definitions})
return m_schema
|
16,666 | def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Find and return switches controlled by telnet commands."""
devices: dict[str, Any] = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
value_template: Template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
TelnetSwitch(
hass,
object_id,
device_config.get(CONF_RESOURCE),
device_config.get(CONF_PORT),
device_config.get(CONF_NAME, object_id),
device_config.get(CONF_COMMAND_ON),
device_config.get(CONF_COMMAND_OFF),
device_config.get(CONF_COMMAND_STATE),
value_template,
device_config.get(CONF_TIMEOUT),
)
)
if not switches:
_LOGGER.error("No switches added")
return
add_entities(switches)
| def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Find and return switches controlled by telnet commands."""
devices: dict[str, Any] = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
value_template: Template | None = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
TelnetSwitch(
hass,
object_id,
device_config.get(CONF_RESOURCE),
device_config.get(CONF_PORT),
device_config.get(CONF_NAME, object_id),
device_config.get(CONF_COMMAND_ON),
device_config.get(CONF_COMMAND_OFF),
device_config.get(CONF_COMMAND_STATE),
value_template,
device_config.get(CONF_TIMEOUT),
)
)
if not switches:
_LOGGER.error("No switches added")
return
add_entities(switches)
|
30,945 | def get_remote_data_command(id: str, lastUpdate: str) -> GetRemoteDataResponse:
""" Mirror-in data to incident from Jira into demisto 'jira issue' incident.
Notes:
1. Documentation on mirroring - https://xsoar.pan.dev/docs/integrations/mirroring_integration
Args:
id: Remote incident id.
lastUpdate: Server last sync time with remote server.
Returns:
GetRemoteDataResponse: Structured incident response.
"""
incident_update = {}
# Get raw response on issue ID
_, _, issue_raw_response = get_issue(issue_id=id)
# Timestamp - Issue last modified in jira server side
jira_modified_date: datetime = parse_date_string(dict_safe_get(issue_raw_response,
['fields', 'updated'], "", str))
# Timestamp - Issue last sync in demisto server side
incident_modified_date: datetime = parse_date_string(lastUpdate)
# Update incidents only if issue modified in jira server side after last sync
if jira_modified_date > incident_modified_date:
incident_update = create_update_incident_from_ticket(issue_raw_response) # Getting labels to be updated in incident
demisto.debug(f"\nUpdate incident:\n\tIncident name: Jira issue {issue_raw_response.get('id')}\n\t"
f"Reason: Issue modified in remote.\n\tIncident Last update time: {incident_modified_date}"
f"\n\tRemote last updated time: {jira_modified_date}\n")
return GetRemoteDataResponse(incident_update, [])
| def get_remote_data_command(id: str, lastUpdate: str) -> GetRemoteDataResponse:
""" Mirror-in data to incident from Jira into demisto 'jira issue' incident.
Notes:
1. Documentation on mirroring - https://xsoar.pan.dev/docs/integrations/mirroring_integration
Args:
id: Remote incident id.
lastUpdate: Server last sync time with remote server.
Returns:
GetRemoteDataResponse: Structured incident response.
"""
incident_update = {}
# Get raw response on issue ID
_, _, issue_raw_response = get_issue(issue_id=id)
# Timestamp - Issue last modified in jira server side
jira_modified_date: datetime = parse_date_string(dict_safe_get(issue_raw_response,
['fields', 'updated'], "", str))
# Timestamp - Issue last sync in demisto server side
incident_modified_date: datetime = parse_date_string(lastUpdate)
# Update incident only if issue modified in Jira server-side after the last sync
if jira_modified_date > incident_modified_date:
incident_update = create_update_incident_from_ticket(issue_raw_response) # Getting labels to be updated in incident
demisto.debug(f"\nUpdate incident:\n\tIncident name: Jira issue {issue_raw_response.get('id')}\n\t"
f"Reason: Issue modified in remote.\n\tIncident Last update time: {incident_modified_date}"
f"\n\tRemote last updated time: {jira_modified_date}\n")
return GetRemoteDataResponse(incident_update, [])
|
57,858 | def get_group_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not (group_id or group_name):
return_error("You must supply either 'id' or 'displayName' in the scim data")
if not group_id:
res = client.search_group(group_name)
res_json = res.json()
if res.status_code == 200:
if res_json.get('totalResults') < 1:
generic_iam_context = OutputContext(success=False, displayName=group_name, errorCode=404,
errorMessage="Group Not Found", details=res_json)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Slack Get Group:', generic_iam_context.data, removeNull=True)
)
else:
group_id = res_json['Resources'][0].get('id')
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json['Errors']['code'],
errorMessage=res_json['Errors']['description'], details=res_json)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Slack Get Group:', generic_iam_context.data, removeNull=True)
)
res = client.get_group_by_id(group_id)
res_json = res.json()
if res.status_code == 200:
include_members = args.get('includeMembers')
if include_members.lower() == 'false' and 'members' in res_json:
del res_json['members']
generic_iam_context = OutputContext(success=True, id=res_json.get('id'),
displayName=res_json.get('displayName'),
members=res_json.get('members'))
elif res.status_code == 404:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id, errorCode=404,
errorMessage="Group Not Found", details=res_json)
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json['Errors']['code'],
errorMessage=res_json['Errors']['description'], details=res_json)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Slack Get Group:', generic_iam_context.data, removeNull=True)
)
| def get_group_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not (group_id or group_name):
return_error("You must supply either 'id' or 'displayName' in the scim data")
if not group_id:
res = client.search_group(group_name)
res_json = res.json()
if res.status_code == 200:
if res_json.get('totalResults') < 1:
generic_iam_context = OutputContext(success=False, displayName=group_name, errorCode=404,
errorMessage="Group Not Found", details=res_json)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Slack Get Group:', generic_iam_context.data, removeNull=True)
)
else:
group_id = res_json['Resources'][0].get('id')
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json['Errors']['code'],
errorMessage=res_json['Errors']['description'], details=res_json)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Slack Get Group:', generic_iam_context.data, removeNull=True)
)
# TODO: initialize generic_iam_context...
if not group_id: # try getting the group by name
generic_iam_context = get_group_by_name(group_name)
group_id = generic_iam_context.id
if not generic_iam_context.data and success is not False: # try getting the group data by id, unless we didn't find it by name
res = client.get_group_by_id(group_id)
res_json = res.json()
if res.status_code == 200:
include_members = args.get('includeMembers')
if include_members.lower() == 'false' and 'members' in res_json:
del res_json['members']
generic_iam_context = OutputContext(success=True, id=res_json.get('id'),
displayName=res_json.get('displayName'),
members=res_json.get('members'))
elif res.status_code == 404:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id, errorCode=404,
errorMessage="Group Not Found", details=res_json)
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=res_json['Errors']['code'],
errorMessage=res_json['Errors']['description'], details=res_json)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=tableToMarkdown('Slack Get Group:', generic_iam_context.data, removeNull=True)
)
|
7,519 | def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
e.g. int, numpy.uint, numpy.int8, numpy.int64, numpy.float,
numpy.float64, numpy.str.
Returns
-------
(converter, converter_type) : (function, generic data-type)
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
``converter_type`` tracks the generic data type produced by the converter
function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False') | (svals == 'True')):
raise ValueError('bool input strings must be only False or True')
vals = numpy.asarray(vals)
trues = vals == 'True'
falses = vals == 'False'
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False or True')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
| def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
e.g. int, numpy.uint, numpy.int8, numpy.int64, numpy.float,
numpy.float64, str.
Returns
-------
(converter, converter_type) : (function, generic data-type)
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
``converter_type`` tracks the generic data type produced by the converter
function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False') | (svals == 'True')):
raise ValueError('bool input strings must be only False or True')
vals = numpy.asarray(vals)
trues = vals == 'True'
falses = vals == 'False'
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False or True')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
|
45,924 | def _draw_pixel(
image: torch.Tensor,
x: int,
y: int,
color: torch.Tensor,
):
r"""Draws a pixel into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
x: the x coordinate of the pixel.
y: the y coordinate of the pixel.
color: the color of the pixel with shape (3).
Return:
Nothing is returned
"""
image[:, y, x] = color
| def _draw_pixel(
image: torch.Tensor,
x: int,
y: int,
color: torch.Tensor,
):
r"""Draws a pixel into an image.
Args:
image: the input image to where to draw the lines with shape :math:`(C,H,W)`.
x: the x coordinate of the pixel.
y: the y coordinate of the pixel.
color: the color of the pixel with shape (3).
Return:
Nothing is returned
"""
image[:, y, x] = color
|
37,095 | def _to_tuple(values):
"""
Return the input, sorted, and as a tuple.
Args:
values: An integer, a list of ints, or a tuple of ints.
Returns:
tuple: The input values as a sorted tuple.
"""
try:
return tuple(sorted(values))
except TypeError:
return (values,)
| def _to_tuple(values):
"""
Return the input, sorted, and as a tuple.
Args:
values: An integer, a list of ints, or a tuple of ints.
Returns:
tuple: The input values as a sorted tuple.
"""
try:
return tuple(values)
except TypeError:
return (values,)
|
22,364 | def make_same_length(list1, list2):
# If either list is 1 item, we'll append to it until its length is the same as the other.
if len(list1) == 1:
for _i in range(1, len(list2)):
list1.append(list1[0])
elif len(list2) == 1:
for _i in range(1, len(list1)):
list2.append(list2[0])
return list1, list2
| def make_same_length(list1, list2):
# If either list is 1 item, we'll append to it until its length is the same as the other.
if len(list1) == 1:
for _i in range(1, len(list2)):
list1.append(list1[0])
elif len(list2) == 1:
for _ in range(1, len(list1)):
list2.append(list2[0])
return list1, list2
|
31,093 | def create_incident_from_saved_data(field_mapping, incident_result=False):
created_incident = {}
demisto_incident = demisto.incident()
custom_fields = demisto_incident.get('CustomFields', {})
if incident_result: # need to return the incident with extra fields
fields_to_create = ['xdralerts', 'xdrfileartifacts', 'xdrnetworkartifacts']
for field in fields_to_create:
created_incident[field] = custom_fields.get(field)
return created_incident
for field in XDR_INCIDENT_FIELDS:
field_in_xdr = field_mapping.get(field)
if field_in_xdr:
if field_in_xdr == 'xdrmodificationtime':
modification_time_in_utc_format = custom_fields.get(field_in_xdr)
date = dateparser.parse(modification_time_in_utc_format, settings={'TIMEZONE': 'UTC'})
if date is None:
# if date is None it means dateparser failed to parse it
raise ValueError(f'The modification date of the incident is invalid: '
f'{modification_time_in_utc_format}')
created_incident[field] = int(date.timestamp() * 1000)
elif field == 'severity':
created_incident[field] = demisto_incident.get(field) # severity fields is in the incident root
else:
created_incident[field] = custom_fields.get(field_in_xdr)
created_incident['incident_id'] = custom_fields.get('xdrincidentid')
return created_incident
| def create_incident_from_saved_data(field_mapping, include_extra_data=False):
created_incident = {}
demisto_incident = demisto.incident()
custom_fields = demisto_incident.get('CustomFields', {})
if incident_result: # need to return the incident with extra fields
fields_to_create = ['xdralerts', 'xdrfileartifacts', 'xdrnetworkartifacts']
for field in fields_to_create:
created_incident[field] = custom_fields.get(field)
return created_incident
for field in XDR_INCIDENT_FIELDS:
field_in_xdr = field_mapping.get(field)
if field_in_xdr:
if field_in_xdr == 'xdrmodificationtime':
modification_time_in_utc_format = custom_fields.get(field_in_xdr)
date = dateparser.parse(modification_time_in_utc_format, settings={'TIMEZONE': 'UTC'})
if date is None:
# if date is None it means dateparser failed to parse it
raise ValueError(f'The modification date of the incident is invalid: '
f'{modification_time_in_utc_format}')
created_incident[field] = int(date.timestamp() * 1000)
elif field == 'severity':
created_incident[field] = demisto_incident.get(field) # severity fields is in the incident root
else:
created_incident[field] = custom_fields.get(field_in_xdr)
created_incident['incident_id'] = custom_fields.get('xdrincidentid')
return created_incident
|
1,715 | def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The individual
file names are not important.
This function does not try to extract features into a numpy array or scipy
sparse matrix. In addition, if load_content is false it does not try to
load the files in memory.
To use text files in a scikit-learn classification or clustering algorithm,
you will need to use the :mod`~sklearn.feature_extraction.text` module to
build a feature extraction transformer that suits your problem.
If you set load_content=True, you should also specify the encoding of the
text using the 'encoding' parameter. For many modern text files, 'utf-8'
will be the correct encoding. If you leave encoding equal to None, then the
content will be made of bytes instead of Unicode, and you will not be able
to use most functions in :mod:`~sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description : string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories. If not None, list of
category names to load (other categories ignored).
load_content : bool, optional (default=True)
Whether to load or not the content of the different files. If true a
'data' attribute containing the text information is present in the data
structure returned. If not, a filenames attribute gives the path to the
files.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for images
or other non-text content). If not None, encoding to use to decode text
files to Unicode if load_content is True.
decode_error : {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
random_state : int, RandomState instance or None, default=0
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : Bunch
Dictionary-like object, with attributes.
For more information, see :class:`sklearn.utils.Bunch`:
data : list of str
Only present when `load_content=True`.
The raw text data to learn.
target : ndarray
The target labels (integer index).
target_names : list
The names of target classes.
DESCR : str
The full description of the dataset.
filenames:
The filenames holding the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
| def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The individual
file names are not important.
This function does not try to extract features into a numpy array or scipy
sparse matrix. In addition, if load_content is false it does not try to
load the files in memory.
To use text files in a scikit-learn classification or clustering algorithm,
you will need to use the :mod`~sklearn.feature_extraction.text` module to
build a feature extraction transformer that suits your problem.
If you set load_content=True, you should also specify the encoding of the
text using the 'encoding' parameter. For many modern text files, 'utf-8'
will be the correct encoding. If you leave encoding equal to None, then the
content will be made of bytes instead of Unicode, and you will not be able
to use most functions in :mod:`~sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description : string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories. If not None, list of
category names to load (other categories ignored).
load_content : bool, optional (default=True)
Whether to load or not the content of the different files. If true a
'data' attribute containing the text information is present in the data
structure returned. If not, a filenames attribute gives the path to the
files.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for images
or other non-text content). If not None, encoding to use to decode text
files to Unicode if load_content is True.
decode_error : {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
random_state : int, RandomState instance or None, default=0
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : Bunch
Dictionary-like object, with the following attributes.
For more information, see :class:`sklearn.utils.Bunch`:
data : list of str
Only present when `load_content=True`.
The raw text data to learn.
target : ndarray
The target labels (integer index).
target_names : list
The names of target classes.
DESCR : str
The full description of the dataset.
filenames:
The filenames holding the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
|
4,603 | def test_invalid_filetype(tmp_path):
"""Invalid file types/associated files for load method."""
bad_nii, bad_conf = create_tmp_filepath(tmp_path, copy_confounds=True,
old_deriveative_suffix=False)
conf, _ = load_confounds(bad_nii)
# more than one legal filename for confounds
add_conf = "test_desc-confounds_regressors.tsv"
leagal_confounds, _ = get_leagal_confound()
leagal_confounds.to_csv(tmp_path / add_conf, sep="\t", index=False)
with pytest.raises(ValueError) as info:
load_confounds(bad_nii)
assert "more than one" in str(info.value)
(tmp_path / add_conf).unlink() # Remove for the rest of the tests to run
# invalid fmriprep version: confound file with no header (<1.0)
fake_confounds = np.random.rand(30, 20)
np.savetxt(bad_conf, fake_confounds, delimiter="\t")
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "The confound file contains no header." in str(error_log.value)
# invalid fmriprep version: old camel case header (<1.2)
leagal_confounds, _ = get_leagal_confound()
camel_confounds = leagal_confounds.copy()
camel_confounds.columns = [
_to_camel_case(col_name) for col_name in leagal_confounds.columns
]
camel_confounds.to_csv(bad_conf, sep="\t", index=False)
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "contains header in camel case." in str(error_log.value)
# create a empty nifti file with no associated confound file
# We only need the path to check this
no_conf = "no_confound_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz"
no_confound = tmp_path / no_conf
no_confound.touch()
with pytest.raises(ValueError):
load_confounds(bad_nii)
| def test_invalid_filetype(tmp_path):
"""Invalid file types/associated files for load method."""
bad_nii, bad_conf = create_tmp_filepath(tmp_path, copy_confounds=True,
old_derivative_suffix=False)
conf, _ = load_confounds(bad_nii)
# more than one legal filename for confounds
add_conf = "test_desc-confounds_regressors.tsv"
leagal_confounds, _ = get_leagal_confound()
leagal_confounds.to_csv(tmp_path / add_conf, sep="\t", index=False)
with pytest.raises(ValueError) as info:
load_confounds(bad_nii)
assert "more than one" in str(info.value)
(tmp_path / add_conf).unlink() # Remove for the rest of the tests to run
# invalid fmriprep version: confound file with no header (<1.0)
fake_confounds = np.random.rand(30, 20)
np.savetxt(bad_conf, fake_confounds, delimiter="\t")
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "The confound file contains no header." in str(error_log.value)
# invalid fmriprep version: old camel case header (<1.2)
leagal_confounds, _ = get_leagal_confound()
camel_confounds = leagal_confounds.copy()
camel_confounds.columns = [
_to_camel_case(col_name) for col_name in leagal_confounds.columns
]
camel_confounds.to_csv(bad_conf, sep="\t", index=False)
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "contains header in camel case." in str(error_log.value)
# create a empty nifti file with no associated confound file
# We only need the path to check this
no_conf = "no_confound_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz"
no_confound = tmp_path / no_conf
no_confound.touch()
with pytest.raises(ValueError):
load_confounds(bad_nii)
|
32,871 | def _attempt_patch_module(module):
# type: (str) -> bool
"""_patch_module will attempt to monkey patch the module.
Returns if the module got patched.
Can also raise errors if it fails.
"""
path = "ddtrace.contrib.%s" % module
with _LOCK:
if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:
log.debug("already patched: %s", path)
return False
try:
imported_module = importlib.import_module(path)
except ImportError:
# if the import fails, the integration is not available
raise ModuleNotFoundException(
"integration '%s' not available in ddtrace.contrib, module will not have traces available" % path
)
else:
# if patch() is not available in the module, it means
# that the library is not installed in the environment
if not hasattr(imported_module, "patch"):
raise AttributeError(
"%s.patch is not found. '%s' is not configured for this environment" % (imported_module, module)
)
imported_module.patch() # type: ignore
_PATCHED_MODULES.add(module)
return True
| def _attempt_patch_module(module):
# type: (str) -> bool
"""_patch_module will attempt to monkey patch the module.
Returns if the module got patched.
Can also raise errors if it fails.
"""
path = "ddtrace.contrib.%s" % module
with _LOCK:
if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:
log.debug("already patched: %s", path)
return False
try:
imported_module = importlib.import_module(path)
except ImportError:
# if the import fails, the integration is not available
raise ModuleNotFoundException(
"integration '%s' not available in ddtrace.contrib, module will not have traces available" % path
)
else:
# if patch() is not available in the module, it means
# that the library is not installed in the environment
if not hasattr(imported_module, "patch"):
raise AttributeError(
"%s.patch is not found. '%s' is not configured for this environment" % (path, module)
)
imported_module.patch() # type: ignore
_PATCHED_MODULES.add(module)
return True
|
36,523 | def get_gdb_version():
try:
cmd = ["gdb", "-nx", "--version"]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
version, stderr = proc.communicate()
if proc.returncode:
raise Exception(f"Command {' '.join(cmd)!r} failed "
f"with exit code {proc.returncode}: "
f"stdout={version!r} stderr={stderr!r}")
except OSError:
# This is what "no gdb" looks like. There may, however, be other
# errors that manifest this way too.
raise unittest.SkipTest("Couldn't find gdb on the path")
# Regex to parse:
# 'GNU gdb (GDB; SUSE Linux Enterprise 12) 7.7\n' -> 7.7
# 'GNU gdb (GDB) Fedora 7.9.1-17.fc22\n' -> 7.9
# 'GNU gdb 6.1.1 [FreeBSD]\n' -> 6.1
# 'GNU gdb (GDB) Fedora (7.5.1-37.fc18)\n' -> 7.5
match = re.search(r"^GNU gdb.*?\b(\d+)\.(\d+)", version)
if match is None:
raise Exception("unable to parse GDB version: %r" % version)
return (version, int(match.group(1)), int(match.group(2)))
| def get_gdb_version():
try:
cmd = ["gdb", "-nx", "--version"]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
version, stderr = proc.communicate()
if proc.returncode != 0:
raise Exception(f"Command {' '.join(cmd)!r} failed "
f"with exit code {proc.returncode}: "
f"stdout={version!r} stderr={stderr!r}")
except OSError:
# This is what "no gdb" looks like. There may, however, be other
# errors that manifest this way too.
raise unittest.SkipTest("Couldn't find gdb on the path")
# Regex to parse:
# 'GNU gdb (GDB; SUSE Linux Enterprise 12) 7.7\n' -> 7.7
# 'GNU gdb (GDB) Fedora 7.9.1-17.fc22\n' -> 7.9
# 'GNU gdb 6.1.1 [FreeBSD]\n' -> 6.1
# 'GNU gdb (GDB) Fedora (7.5.1-37.fc18)\n' -> 7.5
match = re.search(r"^GNU gdb.*?\b(\d+)\.(\d+)", version)
if match is None:
raise Exception("unable to parse GDB version: %r" % version)
return (version, int(match.group(1)), int(match.group(2)))
|
52,488 | def service(service_name, service_option):
if os.path.basename(which('systemctl') or '') == 'systemctl' and is_running_systemd():
status = exec_cmd(f"sudo systemctl {service_option} {service_name}", _raise=False)
if service_option == "status":
return status == 0
if service_option == "reload":
if status == 0:
exec_cmd(f"sudo systemctl {service_option} {service_name}")
else:
exec_cmd(f"sudo systemctl start {service_name}")
elif os.path.basename(which('service') or '') == 'service':
status = exec_cmd(f"sudo service {service_name} {service_option}", _raise=False)
if service_option == "status":
return status == 0
if service_option == "reload":
if status == 0:
exec_cmd(f"sudo service {service_name} {service_option}")
else:
exec_cmd(f"sudo service start {service_option}")
else:
# look for 'service_manager' and 'service_manager_command' in environment
service_manager = os.environ.get("BENCH_SERVICE_MANAGER")
if service_manager:
service_manager_command = (
os.environ.get("BENCH_SERVICE_MANAGER_COMMAND")
or f"{service_manager} {service_option} {service}"
)
exec_cmd(service_manager_command)
else:
log(f"No service manager found: '{service_name} {service_option}' failed to execute", level=2)
| def service(service_name, service_option):
if os.path.basename(which('systemctl') or '') == 'systemctl' and is_running_systemd():
status = exec_cmd(f"sudo systemctl {service_option} {service_name}", _raise=False)
if service_option == "status":
return status == 0
if service_option == "reload":
if status != 0:
exec_cmd(f"sudo systemctl start {service_name}")
elif os.path.basename(which('service') or '') == 'service':
status = exec_cmd(f"sudo service {service_name} {service_option}", _raise=False)
if service_option == "status":
return status == 0
if service_option == "reload":
if status == 0:
exec_cmd(f"sudo service {service_name} {service_option}")
else:
exec_cmd(f"sudo service start {service_option}")
else:
# look for 'service_manager' and 'service_manager_command' in environment
service_manager = os.environ.get("BENCH_SERVICE_MANAGER")
if service_manager:
service_manager_command = (
os.environ.get("BENCH_SERVICE_MANAGER_COMMAND")
or f"{service_manager} {service_option} {service}"
)
exec_cmd(service_manager_command)
else:
log(f"No service manager found: '{service_name} {service_option}' failed to execute", level=2)
|
14,287 | def deprecated(
msg: str, category: Type[Warning] = DeprecationWarning
) -> Callable[[AnyCallableT], AnyCallableT]:
"""Emits a DeprecationWarning when the decorated function is called.
This decorator works on normal functions, methods, and properties.
Usage on properties requires the ``@property`` decorator to appear outside the
``@deprecated`` decorator.
Concrete classes can be deprecated by decorating their ``__init__`` or ``__new__``
method.
Args
msg: the deprecation message
category: the warning class to use
"""
def decorator(f: AnyCallableT) -> AnyCallableT:
@functools.wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> Any:
warnings.warn(msg, category=category, stacklevel=2)
return f(*args, **kwargs)
return wrapper
return decorator
| def deprecated(
msg: str, category: Type[Warning] = DeprecationWarning
) -> Callable[[AnyCallableT], AnyCallableT]:
"""Emits a DeprecationWarning when the decorated function is called.
This decorator works on normal functions, methods, and properties.
Usage on properties requires the ``@property`` decorator to appear outside the
``@deprecated`` decorator.
Concrete classes can be deprecated by decorating their ``__init__`` or ``__new__``
method.
Args:
msg: the deprecation message
category: the warning class to use
"""
def decorator(f: AnyCallableT) -> AnyCallableT:
@functools.wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> Any:
warnings.warn(msg, category=category, stacklevel=2)
return f(*args, **kwargs)
return wrapper
return decorator
|
12,049 | def _generate_cubes(
header, column_headings, coords, data_arrays, cell_methods=None
):
"""
Yield :class:`iris.cube.Cube` instances given
the headers, column headings, coords and data_arrays extracted
from a NAME file.
"""
for i, data_array in enumerate(data_arrays):
# Turn the dictionary of column headings with a list of header
# information for each field into a dictionary of headings for
# just this field.
field_headings = {k: v[i] for k, v in column_headings.items()}
# Make a cube.
cube = iris.cube.Cube(data_array)
# Determine the name and units.
name = "{} {}".format(
field_headings["Species"], field_headings["Quantity"]
)
name = name.upper().replace(" ", "_")
cube.rename(name)
# Some units are not in SI units, are missing spaces or typed
# in the wrong case. _parse_units returns units that are
# recognised by Iris.
cube.units = _parse_units(field_headings["Units"])
# Define and add the singular coordinates of the field (flight
# level, time etc.)
if "Z" in field_headings:
(upper_bound,) = [
field_headings["... to [Z]"]
if "... to [Z]" in field_headings
else None
]
(lower_bound,) = [
field_headings["... from [Z]"]
if "... from [Z]" in field_headings
else None
]
z_coord = _cf_height_from_name(
field_headings["Z"],
upper_bound=upper_bound,
lower_bound=lower_bound,
)
cube.add_aux_coord(z_coord)
# Define the time unit and use it to serialise the datetime for
# the time coordinate.
time_unit = cf_units.Unit(
"hours since epoch", calendar=cf_units.CALENDAR_STANDARD
)
# Build time, height, latitude and longitude coordinates.
for coord in coords:
pts = coord.values
coord_sys = None
if coord.name == "latitude" or coord.name == "longitude":
coord_units = "degrees"
coord_sys = iris.coord_systems.GeogCS(EARTH_RADIUS)
if (
coord.name == "projection_x_coordinate"
or coord.name == "projection_y_coordinate"
):
coord_units = "m"
coord_sys = iris.coord_systems.OSGB()
if coord.name == "height":
coord_units = "m"
long_name = "height above ground level"
pts = coord.values
if coord.name == "altitude":
coord_units = "m"
long_name = "altitude above sea level"
pts = coord.values
if coord.name == "air_pressure":
coord_units = "Pa"
pts = coord.values
if coord.name == "flight_level":
pts = coord.values
long_name = "flight_level"
coord_units = _parse_units("FL")
if coord.name == "time":
coord_units = time_unit
pts = time_unit.date2num(coord.values).astype(float)
if coord.dimension is not None:
if coord.name == "longitude":
circular = iris.util._is_circular(pts, 360.0)
else:
circular = False
if coord.name == "flight_level":
icoord = DimCoord(
points=pts, units=coord_units, long_name=long_name
)
else:
icoord = DimCoord(
points=pts,
standard_name=coord.name,
units=coord_units,
coord_system=coord_sys,
circular=circular,
)
if coord.name == "height" or coord.name == "altitude":
icoord.long_name = long_name
if (
coord.name == "time"
and "Av or Int period" in field_headings
):
dt = coord.values - field_headings["Av or Int period"]
bnds = time_unit.date2num(np.vstack((dt, coord.values)).T)
icoord.bounds = bnds.astype(float)
else:
icoord.guess_bounds()
cube.add_dim_coord(icoord, coord.dimension)
else:
icoord = AuxCoord(
points=pts[i],
standard_name=coord.name,
coord_system=coord_sys,
units=coord_units,
)
if (
coord.name == "time"
and "Av or Int period" in field_headings
):
dt = coord.values - field_headings["Av or Int period"]
bnds = time_unit.date2num(np.vstack((dt, coord.values)).T)
icoord.bounds = bnds[i, :].astype(float)
cube.add_aux_coord(icoord)
# Headings/column headings which are encoded elsewhere.
headings = [
"X",
"Y",
"Z",
"Time",
"T",
"Units",
"Av or Int period",
"... from [Z]",
"... to [Z]",
"X grid origin",
"Y grid origin",
"X grid size",
"Y grid size",
"X grid resolution",
"Y grid resolution",
"Number of field cols",
"Number of preliminary cols",
"Number of fields",
"Number of series",
"Output format",
]
# Add the Main Headings as attributes.
for key, value in header.items():
if value is not None and value != "" and key not in headings:
cube.attributes[key] = value
# Add the Column Headings as attributes
for key, value in field_headings.items():
if value is not None and value != "" and key not in headings:
cube.attributes[key] = value
if cell_methods is not None:
cell_method = cell_methods[i]
if cell_method is not None:
cube.add_cell_method(cell_method)
yield cube
| def _generate_cubes(
header, column_headings, coords, data_arrays, cell_methods=None
):
"""
Yield :class:`iris.cube.Cube` instances given
the headers, column headings, coords and data_arrays extracted
from a NAME file.
"""
for i, data_array in enumerate(data_arrays):
# Turn the dictionary of column headings with a list of header
# information for each field into a dictionary of headings for
# just this field.
field_headings = {k: v[i] for k, v in column_headings.items()}
# Make a cube.
cube = iris.cube.Cube(data_array)
# Determine the name and units.
name = "{} {}".format(
field_headings["Species"], field_headings["Quantity"]
)
name = name.upper().replace(" ", "_")
cube.rename(name)
# Some units are not in SI units, are missing spaces or typed
# in the wrong case. _parse_units returns units that are
# recognised by Iris.
cube.units = _parse_units(field_headings["Units"])
# Define and add the singular coordinates of the field (flight
# level, time etc.)
if "Z" in field_headings:
(upper_bound,) = [
field_headings["... to [Z]"]
if "... to [Z]" in field_headings
else None
]
(lower_bound,) = [
field_headings["... from [Z]"]
if "... from [Z]" in field_headings
else None
]
z_coord = _cf_height_from_name(
field_headings["Z"],
upper_bound=upper_bound,
lower_bound=lower_bound,
)
cube.add_aux_coord(z_coord)
# Define the time unit and use it to serialise the datetime for
# the time coordinate.
time_unit = cf_units.Unit(
"hours since epoch", calendar=cf_units.CALENDAR_STANDARD
)
# Build time, height, latitude and longitude coordinates.
for coord in coords:
pts = coord.values
coord_sys = None
if coord.name == "latitude" or coord.name == "longitude":
coord_units = "degrees"
coord_sys = iris.coord_systems.GeogCS(EARTH_RADIUS)
if (
coord.name == "projection_x_coordinate"
or coord.name == "projection_y_coordinate"
):
coord_units = "m"
coord_sys = iris.coord_systems.OSGB()
if coord.name == "height":
coord_units = "m"
long_name = "height above ground level"
pts = coord.values
if coord.name == "altitude":
coord_units = "m"
long_name = "altitude above sea level"
pts = coord.values
if coord.name == "air_pressure":
coord_units = "Pa"
pts = coord.values
if coord.name == "flight_level":
pts = coord.values
long_name = "flight_level"
coord_units = _parse_units("FL")
if coord.name == "time":
coord_units = time_unit
pts = time_unit.date2num(coord.values).astype(float)
if coord.dimension is not None:
if coord.name == "longitude":
circular = iris.util._is_circular(pts, 360.0)
else:
circular = False
if coord.name == "flight_level":
icoord = DimCoord(
points=pts, units=coord_units, long_name=long_name
)
else:
icoord = DimCoord(
points=pts,
standard_name=coord.name,
units=coord_units,
coord_system=coord_sys,
circular=circular,
)
if coord.name == "height" or coord.name == "altitude":
icoord.long_name = long_name
if (
coord.name == "time"
and "Av or Int period" in field_headings
):
dt = coord.values - field_headings["Av or Int period"]
bnds = time_unit.date2num(np.vstack((dt, coord.values)).T)
icoord.bounds = bnds.astype(float)
else:
icoord.guess_bounds()
cube.add_dim_coord(icoord, coord.dimension)
else:
icoord = AuxCoord(
points=pts[i],
standard_name=coord.name,
coord_system=coord_sys,
units=coord_units,
)
if (
coord.name == "time"
and "Av or Int period" in field_headings
):
dt = coord.values - field_headings["Av or Int period"]
bnds = time_unit.date2num(np.vstack((dt, coord.values)).T)
icoord.bounds = bnds[i, :].astype(float)
cube.add_aux_coord(icoord)
# Headings/column headings which are encoded elsewhere.
headings = [
"X",
"Y",
"Z",
"Time",
"T",
"Units",
"Av or Int period",
"... from [Z]",
"... to [Z]",
"X grid origin",
"Y grid origin",
"X grid size",
"Y grid size",
"X grid resolution",
"Y grid resolution",
"Number of field cols",
"Number of preliminary cols",
"Number of fields",
"Number of series",
"Output format",
]
# Add the Main Headings as attributes.
for key, value in header.items():
if value is not None and value != "" and key not in headings:
cube.attributes[key] = value
# Add the Column Headings as attributes
for key, value in field_headings.items():
if value is not None and value != "" and key not in headings:
cube.attributes[key] = value
if cell_methods is not None:
cell_method, = cell_methods
if cell_method is not None:
cube.add_cell_method(cell_method)
yield cube
|
8,919 | def _join(bot, channel, key=None, save=True):
if not channel:
return
if not key:
bot.join(channel)
else:
bot.join(channel, key)
if save:
channels = dict(_get_config_channels(bot.config.core.channels))
# save only if channel is new or key has been changed
if channel not in channels or channels[channel] != key:
channels[channel] = key
_set_config_channels(bot, channels)
LOGGER.info('Updated core.channels with "%s".', channel)
| def _join(bot, channel, key=None, save=True):
if not channel:
return
if not key:
bot.join(channel)
else:
bot.join(channel, key)
if save:
channels = dict(_get_config_channels(bot.config.core.channels))
# save only if channel is new or key has been changed
if channel not in channels or channels[channel] != key:
channels[channel] = key
_set_config_channels(bot, channels)
LOGGER.info('Added "%s" to core.channels.', channel)
|
31,023 | def panorama_route_lookup(dest_ip: str, virtual_router=None):
"""
Given the provided ip address, looks up the outgoing interface and zone on the firewall.
"""
if not VSYS:
raise Exception("The 'panorama-route-lookup' command is only relevant for a Firewall instance.")
response = panorama_get_routes(virtual_router)
if 'entry' not in response['response']['result']:
raise Exception("No routes returned from the Firewall.")
else:
routes = response['response']['result']['entry']
ip_addr = ipaddress.ip_address(dest_ip)
current_match = None
matched_route = None
for route in routes:
subnet_raw = route['destination']
subnet = ipaddress.ip_network(subnet_raw)
# If the given IP address is in the subnet
if ip_addr in subnet:
# IF we haven't matched yet
if not current_match:
current_match = subnet
matched_route = route
# If this is a greater subnet
elif subnet.prefixlen > current_match.prefixlen:
current_match = subnet
matched_route = route
if matched_route:
return matched_route
else:
raise Exception("Route not found.")
| def panorama_route_lookup(dest_ip: str, virtual_router=None):
"""
Given the provided ip address, looks up the outgoing interface and zone on the firewall.
"""
if not VSYS:
raise Exception("The 'panorama-route-lookup' command is only relevant for a Firewall instance.")
response = panorama_get_routes(virtual_router)
if 'entry' not in response['response']['result']:
raise Exception("No routes returned from the Firewall.")
else:
routes = response['response']['result']['entry']
ip_addr = ipaddress.ip_address(dest_ip)
current_match = None
matched_route = None
for route in routes:
subnet_raw = route.get('destination')
subnet = ipaddress.ip_network(subnet_raw)
# If the given IP address is in the subnet
if ip_addr in subnet:
# IF we haven't matched yet
if not current_match:
current_match = subnet
matched_route = route
# If this is a greater subnet
elif subnet.prefixlen > current_match.prefixlen:
current_match = subnet
matched_route = route
if matched_route:
return matched_route
else:
raise Exception("Route not found.")
|
2,589 | def test_lof_performance(global_dtype):
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2).astype(global_dtype)
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model for novelty detection
clf = neighbors.LocalOutlierFactor(novelty=True).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf.decision_function(X_test)
# check that roc_auc is good
assert roc_auc_score(y_test, y_pred) > 0.99
| def test_lof_performance(global_dtype):
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2).astype(global_dtype, copy=False)
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model for novelty detection
clf = neighbors.LocalOutlierFactor(novelty=True).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf.decision_function(X_test)
# check that roc_auc is good
assert roc_auc_score(y_test, y_pred) > 0.99
|
42,084 | def _get_optimization_history_plot(
studies: List[Study],
target: Optional[Callable[[FrozenTrial], float]],
target_name: str,
error_bar: bool,
) -> "go.Figure":
layout = go.Layout(
title="Optimization History Plot",
xaxis={"title": "trial number"},
yaxis={"title": target_name},
)
if len(studies) == 0:
_logger.warning("There are no studies.")
return go.Figure(data=[], layout=layout)
all_trials = list(
itertools.chain.from_iterable(
(
trial
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
)
for study in studies
)
)
if len(all_trials) == 0:
_logger.warning("There are no complete trials.")
return go.Figure(data=[], layout=layout)
if error_bar:
return _get_optimization_histories_with_error_bar(studies, target, target_name, layout)
else:
return _get_optimization_histories(studies, target, target_name, layout)
| def _get_optimization_history_plot(
studies: List[Study],
target: Optional[Callable[[FrozenTrial], float]],
target_name: str,
error_bar: bool,
) -> "go.Figure":
layout = go.Layout(
title="Optimization History Plot",
xaxis={"title": "Trial Number"},
yaxis={"title": target_name},
)
if len(studies) == 0:
_logger.warning("There are no studies.")
return go.Figure(data=[], layout=layout)
all_trials = list(
itertools.chain.from_iterable(
(
trial
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
)
for study in studies
)
)
if len(all_trials) == 0:
_logger.warning("There are no complete trials.")
return go.Figure(data=[], layout=layout)
if error_bar:
return _get_optimization_histories_with_error_bar(studies, target, target_name, layout)
else:
return _get_optimization_histories(studies, target, target_name, layout)
|
43,270 | def get_seed(seed):
"""
Convenience function to use the global seed by default if the provided seed is None.
Args:
seed (int, optional): seed value
Returns:
seed if not None, otherwise the global seed
"""
if seed is None:
return _sg_seed
else:
return seed
| def get_seed(seed=None):
"""
Convenience function to use the global seed by default if the provided seed is None.
Args:
seed (int, optional): seed value
Returns:
seed if not None, otherwise the global seed
"""
if seed is None:
return _sg_seed
else:
return seed
|
30,866 | def update_output(output, simple_values, element_values, info_dict):
for info in info_dict:
info_type = info.get('type', '')
if info_type == 'simple':
field = dict_safe_get(simple_values, [info.get('field')], {}, dict)
output[info['header']] = dict_safe_get(field, ['values', 0])
elif info_type == 'element':
field = dict_safe_get(element_values, [info.get('field')], {}, dict)
output[info['header']] = dict_safe_get(field, ['elementValues', 0, 'name'])
elif info_type == 'time':
field = dict_safe_get(simple_values, [info.get('field')], {}, dict)
output[info['header']] = translate_timestamp(dict_safe_get(field, ['values', 0], u'', unicode)) if\
dict_safe_get(field, ['values', 0]) else ''
return output
| def update_output(output, simple_values, element_values, info_dict):
for info in info_dict:
info_type = info.get('type', '')
if info_type == 'simple':
output[info['header']] = dict_safe_get(simple_values, [info.get('field'), 'values', 0])
elif info_type == 'element':
field = dict_safe_get(element_values, [info.get('field')], {}, dict)
output[info['header']] = dict_safe_get(field, ['elementValues', 0, 'name'])
elif info_type == 'time':
field = dict_safe_get(simple_values, [info.get('field')], {}, dict)
output[info['header']] = translate_timestamp(dict_safe_get(field, ['values', 0], u'', unicode)) if\
dict_safe_get(field, ['values', 0]) else ''
return output
|
38,968 | def make_literal_validator(type_: Any) -> Callable[[Any], Any]:
permitted_choices: Tuple[Any, ...] = all_literal_values(type_)
allowed_choices_set = set(permitted_choices)
def literal_validator(v: Any) -> Any:
if v not in allowed_choices_set:
raise errors.WrongConstantError(given=v, permitted=permitted_choices)
return v
return literal_validator
| def make_literal_validator(type_: Any) -> Callable[[Any], Any]:
permitted_choices = all_literal_values(type_)
allowed_choices_set = set(permitted_choices)
def literal_validator(v: Any) -> Any:
if v not in allowed_choices_set:
raise errors.WrongConstantError(given=v, permitted=permitted_choices)
return v
return literal_validator
|
37,360 | def _match_Num_or_Parameter(node: ast.AST) -> bool:
"""Match number of circuit Parameter Expression."""
if isinstance(node, ast.Num):
return True
elif (isinstance(node, ast.Constant) and
isinstance(node.value, circuit.ParameterExpression)):
return True
return False
| def _match_num_or_parameter(node: ast.AST) -> bool:
"""Match number of circuit Parameter Expression."""
if isinstance(node, ast.Num):
return True
elif (isinstance(node, ast.Constant) and
isinstance(node.value, circuit.ParameterExpression)):
return True
return False
|
44,514 | def map_resource_to_metadata_type(
mb_sdk_type: aiplatform.base.AiPlatformResourceNoun
) -> Tuple[str, str]:
"""Maps an MB SDK type to Metadata type.
Returns:
Tuple of component parameter name and metadata type.
ie aiplatform.Model -> "model", "Model"
"""
# type should always be in this map
if is_mb_sdk_resource_noun_type(mb_sdk_type):
for key in RESOURCE_TO_METADATA_TYPE.keys():
if issubclass(mb_sdk_type, key):
parameter_name = key.__name__.split('.')[-1].lower()
# replace leading _ for example _Dataset
if parameter_name.startswith("_"):
parameter_name = parameter_name[1:]
return parameter_name, RESOURCE_TO_METADATA_TYPE[key]
# handles the case of exported_dataset
# TODO generalize to all serializable outputs
if is_serializable_to_json(mb_sdk_type):
return "exported_dataset", "JsonArray"
# handles the case of imported datasets
if mb_sdk_type == '_Dataset':
return "Dataset", "Dataset"
| def map_resource_to_metadata_type(
mb_sdk_type: aiplatform.base.AiPlatformResourceNoun
) -> Tuple[str, str]:
"""Maps an MB SDK type to Metadata type.
Returns:
Tuple of component parameter name and metadata type.
ie aiplatform.Model -> "model", "Model"
"""
# type should always be in this map
if is_mb_sdk_resource_noun_type(mb_sdk_type):
for key in RESOURCE_TO_METADATA_TYPE.keys():
if issubclass(mb_sdk_type, key):
parameter_name = key.__name__.split('.')[-1].lower()
# replace leading _ for example _Dataset
if parameter_name.startswith("_"):
parameter_name = parameter_name[1:]
return parameter_name, RESOURCE_TO_METADATA_TYPE[key]
# handles the case of exported_dataset
# TODO generalize to all serializable outputs
if is_serializable_to_json(mb_sdk_type):
return "exported_dataset", "JsonArray"
# handles the case of imported datasets
if mb_sdk_type == '_Dataset':
return "dataset", "Dataset"
|
40,595 | def annuity(n, r):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20, 0.05) * 20 = 1.6"""
if isinstance(r, pd.Series):
return pd.Series(1/n, index=r.index).where(r == 0, r/(1. - 1./(1.+r)**n))
elif r > 0:
return r / (1. - 1./(1.+r)**n)
else:
return 1 / n
| def calculate_annuity(n, r):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20, 0.05) * 20 = 1.6"""
if isinstance(r, pd.Series):
return pd.Series(1/n, index=r.index).where(r == 0, r/(1. - 1./(1.+r)**n))
elif r > 0:
return r / (1. - 1./(1.+r)**n)
else:
return 1 / n
|
22,239 | def install_update_trigger(migrate_engine):
"""Installs trigger on database table to update history table
when contents have changed. Installs a function and a trigger
for postgres, other sql variants only require the trigger def
"""
pg_create_trigger = DDL("""
CREATE FUNCTION update_history_content_update_time()
RETURNS trigger
LANGUAGE 'plpgsql'
AS $BODY$
begin
update history h
set update_time = current_timestamp
from history_dataset_association hda
where h.id = hda.history_id
and hda.dataset_id = NEW.id;
return NEW;
end;
$BODY$;
CREATE TRIGGER update_history_update_time
BEFORE INSERT OR DELETE OR UPDATE
ON dataset
FOR EACH ROW
EXECUTE PROCEDURE update_history_content_update_time();
""").execute_if(dialect='postgresql')
pg_create_trigger.execute(bind=migrate_engine)
# Looks like sqlite doesn't like multiple actions in some
# variants, so we build 3 triggers
build_trigger('INSERT').execute(bind=migrate_engine)
build_trigger('UPDATE').execute(bind=migrate_engine)
build_trigger('DELETE').execute(bind=migrate_engine)
| def install_update_trigger(migrate_engine):
"""Installs trigger on database table to update history table
when contents have changed. Installs a function and a trigger
for postgres, other sql variants only require the trigger def
"""
pg_create_trigger = DDL("""
CREATE FUNCTION update_history_content_update_time()
RETURNS trigger
LANGUAGE 'plpgsql'
AS $BODY$
begin
update history h
set update_time = current_timestamp
from history_dataset_association hda
where h.id = hda.history_id
and hda.dataset_id = NEW.id;
return NEW;
end;
$BODY$;
CREATE TRIGGER update_history_update_time
AFTER INSERT OR DELETE OR UPDATE
ON dataset
FOR EACH ROW
EXECUTE PROCEDURE update_history_content_update_time();
""").execute_if(dialect='postgresql')
pg_create_trigger.execute(bind=migrate_engine)
# Looks like sqlite doesn't like multiple actions in some
# variants, so we build 3 triggers
build_trigger('INSERT').execute(bind=migrate_engine)
build_trigger('UPDATE').execute(bind=migrate_engine)
build_trigger('DELETE').execute(bind=migrate_engine)
|
36,246 | def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
| def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Literal['Normalizer', 'StandardScaler'] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
|
31,210 | def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
mantis_get_all_issues_command(client, args)
elif demisto.command() == 'mantis-create-issue':
mantis_create_issue_command(client, args)
elif demisto.command() == 'mantis-add-note':
matis_create_note_command(client, args)
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
| def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
mantis_get_all_issues_command(client, args)
elif demisto.command() == 'mantis-create-issue':
mantis_create_issue_command(client, args)
elif demisto.command() == 'mantis-add-note':
return_results(matis_create_note_command(client, args))
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
41,841 | def dump_best_config(input_config_file: str, output_config_file: str, study: optuna.Study) -> None:
"""Save jsonnet after updating with parameters from the best trial in the study.
Args:
input_config_file:
Input configuration file (Jsonnet) specified in
:class:`~optuna.integration.AllenNLPExecutor`.
output_config_file:
Output configuration (Json) file.
study:
Instance of :class:`~optuna.study.Study`.
Note that :func:`~optuna.study.Study.optimize` mast have already called.
"""
best_params = study.best_params
for key, value in best_params.items():
best_params[key] = str(value)
best_config = json.loads(_jsonnet.evaluate_file(input_config_file, ext_vars=best_params))
best_config = allennlp.common.params.infer_and_cast(best_config)
with open(output_config_file, "w") as f:
json.dump(best_config, f, indent=4)
| def dump_best_config(input_config_file: str, output_config_file: str, study: optuna.Study) -> None:
"""Save jsonnet after updating with parameters from the best trial in the study.
Args:
input_config_file:
Input configuration file (Jsonnet) specified in
:class:`~optuna.integration.AllenNLPExecutor`.
output_config_file:
Output configuration (JSON) file.
study:
Instance of :class:`~optuna.study.Study`.
Note that :func:`~optuna.study.Study.optimize` mast have already called.
"""
best_params = study.best_params
for key, value in best_params.items():
best_params[key] = str(value)
best_config = json.loads(_jsonnet.evaluate_file(input_config_file, ext_vars=best_params))
best_config = allennlp.common.params.infer_and_cast(best_config)
with open(output_config_file, "w") as f:
json.dump(best_config, f, indent=4)
|
22,370 | def print_folders(pad, folder):
# For debugging...
pad_str = ''
for _i in range(1, pad):
pad_str += ' '
print(f'{pad_str} id: {folder.id} key: {folder.key}')
for repository_dependency in folder.repository_dependencies:
print(f' {pad_str}{repository_dependency.listify}')
for sub_folder in folder.folders:
print_folders(pad + 5, sub_folder)
| def print_folders(pad, folder):
# For debugging...
pad_str = ' ' * pad
for _i in range(1, pad):
pad_str += ' '
print(f'{pad_str} id: {folder.id} key: {folder.key}')
for repository_dependency in folder.repository_dependencies:
print(f' {pad_str}{repository_dependency.listify}')
for sub_folder in folder.folders:
print_folders(pad + 5, sub_folder)
|
14,214 | def load_entry():
"""Gather entry point information by parsing :envar:`COCOTB_ENTRY_POINT`."""
entry_point_str = os.environ.get("COCOTB_ENTRY_POINT", "cocotb:_initialise_testbench")
try:
if ":" not in entry_point_str:
raise ValueError("Invalid COCOTB_ENTRY_POINT, missing entry function (no colon).")
entry_module_str, entry_func_str = entry_point_str.split(":", 1)
entry_module = importlib.import_module(entry_module_str)
entry_func = reduce(getattr, entry_func_str.split('.'), entry_module)
except Exception as e:
raise RuntimeError("Failure to parse COCOTB_ENTRY_POINT ('{}')".format(entry_point_str)) from e
else:
return entry_module, entry_func
| def load_entry():
"""Gather entry point information by parsing :envvar:`COCOTB_ENTRY_POINT`."""
entry_point_str = os.environ.get("COCOTB_ENTRY_POINT", "cocotb:_initialise_testbench")
try:
if ":" not in entry_point_str:
raise ValueError("Invalid COCOTB_ENTRY_POINT, missing entry function (no colon).")
entry_module_str, entry_func_str = entry_point_str.split(":", 1)
entry_module = importlib.import_module(entry_module_str)
entry_func = reduce(getattr, entry_func_str.split('.'), entry_module)
except Exception as e:
raise RuntimeError("Failure to parse COCOTB_ENTRY_POINT ('{}')".format(entry_point_str)) from e
else:
return entry_module, entry_func
|
9,872 | def main():
""" This section is for arguments parsing """
state_map = dict(
present='vrouter-bgp-add',
absent='vrouter-bgp-remove',
update='vrouter-bgp-modify'
)
argument_spec = dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=False, type='str', choices=state_map.keys(), default='present'),
pn_neighbor=dict(required=False, type='str'),
pn_vrouter_name=dict(required=False, type='str'),
pn_send_community=dict(required=False, type='bool'),
pn_weight=dict(required=False, type='str'),
pn_multi_protocol=dict(required=False, type='str', choices=['ipv4-unicast', 'ipv6-unicast']),
pn_prefix_list_in=dict(required=False, type='str'),
pn_route_reflector_client=dict(required=False, type='bool'),
pn_default_originate=dict(required=False, type='bool'),
pn_neighbor_holdtime=dict(required=False, type='str'),
pn_connect_retry_interval=dict(required=False, type='str'),
pn_advertisement_interval=dict(required=False, type='str'),
pn_route_map_out=dict(required=False, type='str'),
pn_update_source=dict(required=False, type='str'),
pn_bfd=dict(required=False, type='bool', default=False),
pn_next_hop_self=dict(required=False, type='bool'),
pn_allowas_in=dict(required=False, type='bool'),
pn_neighbor_keepalive_interval=dict(required=False, type='str'),
pn_max_prefix=dict(required=False, type='str'),
pn_bfd_multihop=dict(required=False, type='bool'),
pn_interface=dict(required=False, type='str'),
pn_password=dict(required=False, type='str', no_log=True),
pn_route_map_in=dict(required=False, type='str'),
pn_soft_reconfig_inbound=dict(required=False, type='bool'),
pn_override_capability=dict(required=False, type='bool'),
pn_max_prefix_warn_only=dict(required=False, type='bool'),
pn_ebgp_multihop=dict(required=False, type='str'),
pn_remote_as=dict(required=False, type='str'),
pn_prefix_list_out=dict(required=False, type='str'),
pn_no_route_map_out=dict(required=False, type='str'),
pn_no_route_map_in=dict(required=False, type='str'),
),
module = AnsibleModule(
argument_spec=argument_spec,
required_if=(
["state", "present", ["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent", ["pn_vrouter_name", "pn_neighbor"]],
["state", "update", ["pn_vrouter_name", "pn_neighbor"]]
),
required_one_of=[['pn_send_community', 'pn_weight', 'pn_multi_protocol',
'pn_prefix_list_in', 'pn_route_reflector_client', 'pn_default_originate',
'pn_neighbor_holdtime', 'pn_connect_retry_interval', 'pn_advertisement_interval',
'pn_route_map_out', 'pn_update_source', 'pn_bfd',
'pn_next_hop_self', 'pn_allowas_in', 'pn_neighbor_keepalive_interval',
'pn_max_prefix', 'pn_bfd_multihop', 'pn_interface',
'pn_password', 'pn_route_map_in', 'pn_soft_reconfig_inbound',
'pn_override_capability', 'pn_max_prefix_warn_only', 'pn_ebgp_multihop',
'pn_remote_as', 'pn_prefix_list_out', 'pn_no_route_map_out',
'pn_no_route_map_in']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
neighbor = module.params['pn_neighbor']
vrouter_name = module.params['pn_vrouter_name']
send_community = module.params['pn_send_community']
weight = module.params['pn_weight']
multi_protocol = module.params['pn_multi_protocol']
prefix_list_in = module.params['pn_prefix_list_in']
route_reflector_client = module.params['pn_route_reflector_client']
default_originate = module.params['pn_default_originate']
neighbor_holdtime = module.params['pn_neighbor_holdtime']
connect_retry_interval = module.params['pn_connect_retry_interval']
advertisement_interval = module.params['pn_advertisement_interval']
route_map_out = module.params['pn_route_map_out']
update_source = module.params['pn_update_source']
bfd = module.params['pn_bfd']
next_hop_self = module.params['pn_next_hop_self']
allowas_in = module.params['pn_allowas_in']
neighbor_keepalive_interval = module.params['pn_neighbor_keepalive_interval']
max_prefix = module.params['pn_max_prefix']
bfd_multihop = module.params['pn_bfd_multihop']
interface = module.params['pn_interface']
password = module.params['pn_password']
route_map_in = module.params['pn_route_map_in']
soft_reconfig_inbound = module.params['pn_soft_reconfig_inbound']
override_capability = module.params['pn_override_capability']
max_prefix_warn_only = module.params['pn_max_prefix_warn_only']
ebgp_multihop = module.params['pn_ebgp_multihop']
remote_as = module.params['pn_remote_as']
prefix_list_out = module.params['pn_prefix_list_out']
no_route_map_out = module.params['pn_no_route_map_out']
no_route_map_in = module.params['pn_no_route_map_in']
command = state_map[state]
if weight and weight != 'none':
if int(weight) < 1 or int(weight) > 65535:
module.fail_json(
failed=True,
msg='Valid weight range is 1 to 65535'
)
# Building the CLI command string
cli = pn_cli(module, cliswitch)
VROUTER_EXISTS, NEIGHBOR_EXISTS = check_cli(module, cli)
if state:
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if command == 'vrouter-bgp-remove' or command == 'vrouter-bgp-modify':
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg='BGP neighbor with IP %s does not exist on %s' % (neighbor, vrouter_name)
)
if command == 'vrouter-bgp-add':
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg='BGP neighbor with IP %s already exists on %s' % (neighbor, vrouter_name)
)
cli += ' %s vrouter-name %s neighbor %s ' % (command, vrouter_name, neighbor)
if command == 'vrouter-bgp-add' or command == 'vrouter-bgp-modify':
if weight:
cli += ' weight ' + weight
if multi_protocol:
cli += ' multi-protocol ' + multi_protocol
if prefix_list_in:
cli += ' prefix-list-in ' + prefix_list_in
if neighbor_holdtime:
is_valid(module, 'neighbor holdtime', neighbor_holdtime, '0', '65535')
cli += ' neighbor-holdtime ' + neighbor_holdtime
if connect_retry_interval:
is_valid(module, 'connect retry interval', connect_retry_interval, '0', '65535')
cli += ' connect-retry-interval ' + connect_retry_interval
if advertisement_interval:
is_valid(module, 'advertisement interval', advertisement_interval, '0', '65535')
cli += ' advertisement-interval ' + advertisement_interval
if route_map_out:
cli += ' route-map-out ' + route_map_out
if update_source:
cli += ' update-source ' + update_source
if neighbor_keepalive_interval:
is_valid(module, 'neighbor keepalive interval', neighbor_keepalive_interval, '0', '65535')
cli += ' neighbor-keepalive-interval ' + neighbor_keepalive_interval
if max_prefix:
cli += ' max-prefix ' + max_prefix
if interface:
cli += ' interface ' + interface
if password:
cli += ' password ' + password
if route_map_in:
cli += ' route-map-in ' + route_map_in
if ebgp_multihop:
is_valid(module, 'ebgp_multihop', ebgp_multihop, '1', '255')
cli += ' ebgp-multihop ' + ebgp_multihop
if remote_as:
cli += ' remote-as ' + remote_as
if prefix_list_out:
cli += ' prefix-list-out ' + prefix_list_out
cli += booleanArgs(send_community, 'send-community', 'no-send-community')
cli += booleanArgs(route_reflector_client, 'route-reflector-client', 'no-route-reflector-client')
cli += booleanArgs(default_originate, 'default-originate', 'no-default-originate')
cli += booleanArgs(bfd, 'bfd', 'no-bfd')
cli += booleanArgs(next_hop_self, 'next-hop-self', 'no-next-hop-self')
cli += booleanArgs(allowas_in, 'allowas-in', 'no-allowas-in')
cli += booleanArgs(bfd_multihop, 'bfd-multihop', 'no-bfd-multihop')
cli += booleanArgs(soft_reconfig_inbound, 'soft-reconfig-inbound', 'no-soft-reconfig-inbound')
cli += booleanArgs(override_capability, 'override-capability', 'no-override-capability')
cli += booleanArgs(max_prefix_warn_only, 'max-prefix-warn-only', 'no-max-prefix-warn-only')
if command == 'vrouter-bgp-modify':
if no_route_map_out:
cli += ' no-route-map-out ' + no_route_map_out
if no_route_map_in:
cli += ' no-route-map-in ' + no_route_map_in
run_cli(module, cli, state_map)
| def main():
""" This section is for arguments parsing """
state_map = dict(
present='vrouter-bgp-add',
absent='vrouter-bgp-remove',
update='vrouter-bgp-modify'
)
argument_spec = dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=False, type='str', choices=state_map.keys(), default='present'),
pn_neighbor=dict(required=False, type='str'),
pn_vrouter_name=dict(required=False, type='str'),
pn_send_community=dict(required=False, type='bool'),
pn_weight=dict(required=False, type='str'),
pn_multi_protocol=dict(required=False, type='str', choices=['ipv4-unicast', 'ipv6-unicast']),
pn_prefix_list_in=dict(required=False, type='str'),
pn_route_reflector_client=dict(required=False, type='bool'),
pn_default_originate=dict(required=False, type='bool'),
pn_neighbor_holdtime=dict(required=False, type='str'),
pn_connect_retry_interval=dict(required=False, type='str'),
pn_advertisement_interval=dict(required=False, type='str'),
pn_route_map_out=dict(required=False, type='str'),
pn_update_source=dict(required=False, type='str'),
pn_bfd=dict(required=False, type='bool', default=False),
pn_next_hop_self=dict(required=False, type='bool'),
pn_allowas_in=dict(required=False, type='bool'),
pn_neighbor_keepalive_interval=dict(required=False, type='str'),
pn_max_prefix=dict(required=False, type='str'),
pn_bfd_multihop=dict(required=False, type='bool'),
pn_interface=dict(required=False, type='str'),
pn_password=dict(required=False, type='str', no_log=True),
pn_route_map_in=dict(required=False, type='str'),
pn_soft_reconfig_inbound=dict(required=False, type='bool'),
pn_override_capability=dict(required=False, type='bool'),
pn_max_prefix_warn_only=dict(required=False, type='bool'),
pn_ebgp_multihop=dict(required=False, type='str'),
pn_remote_as=dict(required=False, type='str'),
pn_prefix_list_out=dict(required=False, type='str'),
pn_no_route_map_out=dict(required=False, type='str'),
pn_no_route_map_in=dict(required=False, type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_if=(
["state", "present", ["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent", ["pn_vrouter_name", "pn_neighbor"]],
["state", "update", ["pn_vrouter_name", "pn_neighbor"]]
),
required_one_of=[['pn_send_community', 'pn_weight', 'pn_multi_protocol',
'pn_prefix_list_in', 'pn_route_reflector_client', 'pn_default_originate',
'pn_neighbor_holdtime', 'pn_connect_retry_interval', 'pn_advertisement_interval',
'pn_route_map_out', 'pn_update_source', 'pn_bfd',
'pn_next_hop_self', 'pn_allowas_in', 'pn_neighbor_keepalive_interval',
'pn_max_prefix', 'pn_bfd_multihop', 'pn_interface',
'pn_password', 'pn_route_map_in', 'pn_soft_reconfig_inbound',
'pn_override_capability', 'pn_max_prefix_warn_only', 'pn_ebgp_multihop',
'pn_remote_as', 'pn_prefix_list_out', 'pn_no_route_map_out',
'pn_no_route_map_in']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
neighbor = module.params['pn_neighbor']
vrouter_name = module.params['pn_vrouter_name']
send_community = module.params['pn_send_community']
weight = module.params['pn_weight']
multi_protocol = module.params['pn_multi_protocol']
prefix_list_in = module.params['pn_prefix_list_in']
route_reflector_client = module.params['pn_route_reflector_client']
default_originate = module.params['pn_default_originate']
neighbor_holdtime = module.params['pn_neighbor_holdtime']
connect_retry_interval = module.params['pn_connect_retry_interval']
advertisement_interval = module.params['pn_advertisement_interval']
route_map_out = module.params['pn_route_map_out']
update_source = module.params['pn_update_source']
bfd = module.params['pn_bfd']
next_hop_self = module.params['pn_next_hop_self']
allowas_in = module.params['pn_allowas_in']
neighbor_keepalive_interval = module.params['pn_neighbor_keepalive_interval']
max_prefix = module.params['pn_max_prefix']
bfd_multihop = module.params['pn_bfd_multihop']
interface = module.params['pn_interface']
password = module.params['pn_password']
route_map_in = module.params['pn_route_map_in']
soft_reconfig_inbound = module.params['pn_soft_reconfig_inbound']
override_capability = module.params['pn_override_capability']
max_prefix_warn_only = module.params['pn_max_prefix_warn_only']
ebgp_multihop = module.params['pn_ebgp_multihop']
remote_as = module.params['pn_remote_as']
prefix_list_out = module.params['pn_prefix_list_out']
no_route_map_out = module.params['pn_no_route_map_out']
no_route_map_in = module.params['pn_no_route_map_in']
command = state_map[state]
if weight and weight != 'none':
if int(weight) < 1 or int(weight) > 65535:
module.fail_json(
failed=True,
msg='Valid weight range is 1 to 65535'
)
# Building the CLI command string
cli = pn_cli(module, cliswitch)
VROUTER_EXISTS, NEIGHBOR_EXISTS = check_cli(module, cli)
if state:
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if command == 'vrouter-bgp-remove' or command == 'vrouter-bgp-modify':
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg='BGP neighbor with IP %s does not exist on %s' % (neighbor, vrouter_name)
)
if command == 'vrouter-bgp-add':
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg='BGP neighbor with IP %s already exists on %s' % (neighbor, vrouter_name)
)
cli += ' %s vrouter-name %s neighbor %s ' % (command, vrouter_name, neighbor)
if command == 'vrouter-bgp-add' or command == 'vrouter-bgp-modify':
if weight:
cli += ' weight ' + weight
if multi_protocol:
cli += ' multi-protocol ' + multi_protocol
if prefix_list_in:
cli += ' prefix-list-in ' + prefix_list_in
if neighbor_holdtime:
is_valid(module, 'neighbor holdtime', neighbor_holdtime, '0', '65535')
cli += ' neighbor-holdtime ' + neighbor_holdtime
if connect_retry_interval:
is_valid(module, 'connect retry interval', connect_retry_interval, '0', '65535')
cli += ' connect-retry-interval ' + connect_retry_interval
if advertisement_interval:
is_valid(module, 'advertisement interval', advertisement_interval, '0', '65535')
cli += ' advertisement-interval ' + advertisement_interval
if route_map_out:
cli += ' route-map-out ' + route_map_out
if update_source:
cli += ' update-source ' + update_source
if neighbor_keepalive_interval:
is_valid(module, 'neighbor keepalive interval', neighbor_keepalive_interval, '0', '65535')
cli += ' neighbor-keepalive-interval ' + neighbor_keepalive_interval
if max_prefix:
cli += ' max-prefix ' + max_prefix
if interface:
cli += ' interface ' + interface
if password:
cli += ' password ' + password
if route_map_in:
cli += ' route-map-in ' + route_map_in
if ebgp_multihop:
is_valid(module, 'ebgp_multihop', ebgp_multihop, '1', '255')
cli += ' ebgp-multihop ' + ebgp_multihop
if remote_as:
cli += ' remote-as ' + remote_as
if prefix_list_out:
cli += ' prefix-list-out ' + prefix_list_out
cli += booleanArgs(send_community, 'send-community', 'no-send-community')
cli += booleanArgs(route_reflector_client, 'route-reflector-client', 'no-route-reflector-client')
cli += booleanArgs(default_originate, 'default-originate', 'no-default-originate')
cli += booleanArgs(bfd, 'bfd', 'no-bfd')
cli += booleanArgs(next_hop_self, 'next-hop-self', 'no-next-hop-self')
cli += booleanArgs(allowas_in, 'allowas-in', 'no-allowas-in')
cli += booleanArgs(bfd_multihop, 'bfd-multihop', 'no-bfd-multihop')
cli += booleanArgs(soft_reconfig_inbound, 'soft-reconfig-inbound', 'no-soft-reconfig-inbound')
cli += booleanArgs(override_capability, 'override-capability', 'no-override-capability')
cli += booleanArgs(max_prefix_warn_only, 'max-prefix-warn-only', 'no-max-prefix-warn-only')
if command == 'vrouter-bgp-modify':
if no_route_map_out:
cli += ' no-route-map-out ' + no_route_map_out
if no_route_map_in:
cli += ' no-route-map-in ' + no_route_map_in
run_cli(module, cli, state_map)
|
31,903 | def start_quick_scan(client, data_args):
# get computer group ID from computer group name
computer_group_name = data_args.get('computer-group-name')
raw_response = client.do_request('GET', f"/api/v2/groups/by-name/{computer_group_name}")
raw_response_data = raw_response.get('data')
if not raw_response_data:
msg = f'No group exists with name {computer_group_name}.'
msg += 'Also, please verify that your account has sufficient permissions to access the groups'
raise DemistoException(msg)
data = {
'intelDocId': int(data_args.get('intel-doc-id')),
'computerGroupId': int(raw_response_data.get('id'))
}
raw_response = client.do_request('POST', '/plugin/products/detect3/api/v1/quick-scans/', json_data=data)
quick_scan = get_quick_scan_item(raw_response)
context = createContext(quick_scan, removeNull=True)
outputs = {'Tanium.QuickScan(val.ID && val.ID === obj.ID)': context}
human_readable = tableToMarkdown('Quick Scan started', quick_scan, headerTransform=pascalToSpace, removeNull=True)
return human_readable, outputs, raw_response
| def start_quick_scan(client, data_args):
# get computer group ID from computer group name
computer_group_name = data_args.get('computer-group-name')
raw_response = client.do_request('GET', f"/api/v2/groups/by-name/{computer_group_name}")
raw_response_data = raw_response.get('data')
if not raw_response_data:
msg = f'No group exists with name {computer_group_name}.'
msg += 'Also, please verify that your account has sufficient permissions to access the groups'
raise DemistoException(msg)
data = {
'intelDocId': int(data_args.get('intel_doc_id')),
'computerGroupId': int(raw_response_data.get('id'))
}
raw_response = client.do_request('POST', '/plugin/products/detect3/api/v1/quick-scans/', json_data=data)
quick_scan = get_quick_scan_item(raw_response)
context = createContext(quick_scan, removeNull=True)
outputs = {'Tanium.QuickScan(val.ID && val.ID === obj.ID)': context}
human_readable = tableToMarkdown('Quick Scan started', quick_scan, headerTransform=pascalToSpace, removeNull=True)
return human_readable, outputs, raw_response
|
13,747 | def get_course_enrollment_details(course_id, include_expired=False):
"""Get the course modes for course. Also get enrollment start and end date, invite only, etc.
Given a course_id, return a serializable dictionary of properties describing course enrollment information.
Args:
course_id (str): The Course to get enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary of course enrollment information.
Example:
>>> get_course_enrollment_details("edX/DemoX/2014T2")
{
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
"""
cache_key = 'enrollment.course.details.{course_id}.{include_expired}'.format(
course_id=course_id,
include_expired=include_expired
)
cached_enrollment_data = None
try:
cached_enrollment_data = cache.get(cache_key)
except Exception: # pylint: disable=broad-except
# The cache backend could raise an exception (for example, memcache keys that contain spaces)
log.exception("Error occurred while retrieving course enrollment details from the cache")
if cached_enrollment_data:
return cached_enrollment_data
course_enrollment_details = _data_api().get_course_enrollment_info(course_id, include_expired)
try:
cache_time_out = getattr(settings, 'ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
cache.set(cache_key, course_enrollment_details, cache_time_out)
except Exception:
# Catch any unexpected errors during caching.
log.exception("Error occurred while caching course enrollment details for course %s", course_id)
raise errors.CourseEnrollmentError("An unexpected error occurred while retrieving course enrollment details.") # lint-amnesty, pylint: disable=raise-missing-from
return course_enrollment_details
| def get_course_enrollment_details(course_id, include_expired=False):
"""Get the course modes for course. Also get enrollment start and end date, invite only, etc.
Given a course_id, return a serializable dictionary of properties describing course enrollment information.
Args:
course_id (str): The Course to get enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary of course enrollment information.
Example:
>>> get_course_enrollment_details("edX/DemoX/2014T2")
{
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
"""
cache_key = f'enrollment.course.details.{course_id}.{include_expired}'
cached_enrollment_data = None
try:
cached_enrollment_data = cache.get(cache_key)
except Exception: # pylint: disable=broad-except
# The cache backend could raise an exception (for example, memcache keys that contain spaces)
log.exception("Error occurred while retrieving course enrollment details from the cache")
if cached_enrollment_data:
return cached_enrollment_data
course_enrollment_details = _data_api().get_course_enrollment_info(course_id, include_expired)
try:
cache_time_out = getattr(settings, 'ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
cache.set(cache_key, course_enrollment_details, cache_time_out)
except Exception:
# Catch any unexpected errors during caching.
log.exception("Error occurred while caching course enrollment details for course %s", course_id)
raise errors.CourseEnrollmentError("An unexpected error occurred while retrieving course enrollment details.") # lint-amnesty, pylint: disable=raise-missing-from
return course_enrollment_details
|
39,875 | def confirm_staged_stake(stakeholder, value, duration):
click.confirm(f"""
* Ursula Node Operator Notice *
-------------------------------
By agreeing to stake {str(value)}:
- Staked tokens will be locked, and unavailable for transactions for the stake duration.
- You are obligated to maintain a networked and available Ursula-Worker node with the
for the duration of the stake(s) ({duration} periods)
- Agree to allow NuCypher network users to carry out uninterrupted re-encryption
work orders at-will without interference.
Failure to keep your node online, or violation of re-encryption work orders
will result in the loss of staked tokens as described in the NuCypher slashing protocol.
Keeping your Ursula node online during the staking period and successfully
performing accurate re-encryption work orders will result in rewards
paid out in ETH retro-actively, on-demand.
Accept ursula node operator obligation?""", abort=True)
| def confirm_staged_stake(stakeholder, value, duration):
click.confirm(f"""
* Ursula Node Operator Notice *
-------------------------------
By agreeing to stake {str(value)}:
- Staked tokens will be locked, and unavailable for transactions for the stake duration.
- You are obligated to maintain a networked and available Ursula-Worker node with the
for the duration of the stake(s) ({duration} periods)
- Agree to allow NuCypher network users to carry out uninterrupted re-encryption
- You are obligated to maintain a networked and available Ursula-Worker node
Failure to keep your node online, or violation of re-encryption work orders
will result in the loss of staked tokens as described in the NuCypher slashing protocol.
Keeping your Ursula node online during the staking period and successfully
performing accurate re-encryption work orders will result in rewards
paid out in ETH retro-actively, on-demand.
Accept ursula node operator obligation?""", abort=True)
|
30,413 | def check_base_branch(pr_num):
print_color('Starting to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
base_branch = get_base_branch(pr_num)
print_color('Finished to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
if base_branch == 'master':
print_error("You cannot merge into master when creating an external PR.")
sys.exit(1)
else:
print_color('Base branch of PR num {} is not master - Great!'.format(pr_num), LOG_COLORS.GREEN)
sys.exit(0)
| def check_base_branch(pr_num):
print_color('Starting to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
base_branch = get_base_branch(pr_num)
print_color('Finished to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
if base_branch == 'master':
print_error("You cannot merge into master when creating an external PR.")
sys.exit(1)
else:
print_color('Verified pull request base branch successfully', LOG_COLORS.GREEN)
sys.exit(0)
|
29,810 | def info_mark():
"""
:return: string that can print an info symbol
"""
return PaastaColors.blue("\u2139")
| def info_mark() -> str:
"""
:return: string that can print an info symbol
"""
return PaastaColors.blue("\u2139")
|
27,942 | def evaluate(model, X_test, Y_test, eval_size, batch_size):
N_test = X_test.shape[0] if eval_size is None else eval_size
if N_test > X_test.shape[0]:
raise ValueError(
'Test size can be no larger than {}'.format(X_test.shape[0]))
with chx.no_backprop_mode():
total_loss = chx.array(0, dtype=chx.float32)
num_correct = chx.array(0, dtype=chx.int64)
for i in range(0, N_test, batch_size):
x = X_test[i:min(i + batch_size, N_test)]
t = Y_test[i:min(i + batch_size, N_test)]
y = model.forward(x)
total_loss += chx.softmax_cross_entropy(y, t)
num_correct += (y.argmax(axis=1).astype(t.dtype)
== t).astype(chx.int32).sum()
mean_loss = float(total_loss) / N_test
accuracy = int(num_correct) / N_test
return mean_loss, accuracy
| def evaluate(model, X_test, Y_test, eval_size, batch_size):
N_test = X_test.shape[0] if eval_size is None else eval_size
if N_test > X_test.shape[0]:
raise ValueError(
'Test size can be no larger than {}'.format(X_test.shape[0]))
with chx.no_backprop_mode():
total_loss = chx.array(0, dtype=chx.float32)
num_correct = chx.array(0, dtype=chx.int64)
for i in range(0, N_test, batch_size):
x = X_test[i:min(i + batch_size, N_test)]
t = Y_test[i:min(i + batch_size, N_test)]
y = model.forward(x)
total_loss += chx.softmax_cross_entropy(y, t).sum()
num_correct += (y.argmax(axis=1).astype(t.dtype)
== t).astype(chx.int32).sum()
mean_loss = float(total_loss) / N_test
accuracy = int(num_correct) / N_test
return mean_loss, accuracy
|
34,309 | def add_confused_intents_to_report(
report: Dict, target_intents: Iterable[Any], predicted_intents: Iterable[Any]
) -> Dict:
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
cnf_matrix = confusion_matrix(target_intents, predicted_intents)
indices = np.argsort(cnf_matrix, axis=1)
labels = unique_labels(target_intents, predicted_intents)
n_candidates = min(3, len(labels))
for label in labels:
if report.get(label):
report[label]["confused_with"] = {}
for i, label in enumerate(labels):
for j in range(n_candidates):
label_idx = indices[i, -j]
_label = labels[label_idx]
num_hits = int(cnf_matrix[i, label_idx])
if _label != label and num_hits > 0:
report[label]["confused_with"][_label] = num_hits
return report
| def add_confused_intents_to_report(
report: Dict, target_intents: Iterable[Any], predicted_intents: Iterable[Any]
) -> Dict[Text, Dict[Text, Union[Dict, float]]]:
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
cnf_matrix = confusion_matrix(target_intents, predicted_intents)
indices = np.argsort(cnf_matrix, axis=1)
labels = unique_labels(target_intents, predicted_intents)
n_candidates = min(3, len(labels))
for label in labels:
if report.get(label):
report[label]["confused_with"] = {}
for i, label in enumerate(labels):
for j in range(n_candidates):
label_idx = indices[i, -j]
_label = labels[label_idx]
num_hits = int(cnf_matrix[i, label_idx])
if _label != label and num_hits > 0:
report[label]["confused_with"][_label] = num_hits
return report
|
31,073 | def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
is_nightly: bool = False):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
is_nightly (bool): Is the build nightly or not.
"""
if is_nightly:
install_nightly_packs(client, host, packs_to_install)
return
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
logging.info(f'Installing packs to server {host}')
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs in server {host}:\n{packs_to_install_str}')
# make the pack installation request
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for
pack in
ast.literal_eval(response_data)]
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception:
logging.exception('The request to install packs has failed.')
global SUCCESS_FLAG
SUCCESS_FLAG = False
| def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
is_nightly: bool = False):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
is_nightly (bool): Is the build nightly or not.
"""
if is_nightly:
install_nightly_packs(client, host, packs_to_install)
return
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
logging.info(f'Installing packs to server {host}')
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs on server {host}:\n{packs_to_install_str}')
# make the pack installation request
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for
pack in
ast.literal_eval(response_data)]
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception:
logging.exception('The request to install packs has failed.')
global SUCCESS_FLAG
SUCCESS_FLAG = False
|
21,246 | def _make_glob_from_args(
reporter: Optional[str],
volumes: Optional[range],
page: Optional[str],
) -> List[str]:
"""Make list of glob paths
:param reporter: The reporter to filter if any
:param volumes: The volumes of the reporter to filter to, if any
:return: A list of glob paths
"""
if reporter and volumes and page:
glob_path = os.path.join(
settings.MEDIA_ROOT,
"harvard_corpus",
f"law.free.cap.{reporter}.{volumes[0]}",
f"{page}.*.json",
)
return [glob_path]
elif reporter and volumes:
glob_paths = []
for volume in volumes:
glob_path = os.path.join(
settings.MEDIA_ROOT,
"harvard_corpus",
f"law.free.cap.{reporter}.{volume}",
".*.json",
)
glob_paths.append(glob_path)
return glob_paths
elif reporter:
reporter_key = ".".join(["law.free.cap", reporter])
glob_path = os.path.join(
settings.MEDIA_ROOT,
"harvard_corpus",
f"{reporter_key}.*/*.json",
)
else:
glob_path = os.path.join(
settings.MEDIA_ROOT, "harvard_corpus", "law.free.cap.*/*.json"
)
return [glob_path]
| def _make_glob_from_args(
reporter: Optional[str],
volumes: Optional[range],
page: Optional[str],
) -> List[str]:
"""Make list of glob paths
:param reporter: The reporter to filter if any
:param volumes: The volumes of the reporter to filter to, if any
:return: A list of glob paths
"""
if reporter and volumes and page:
glob_path = os.path.join(
settings.MEDIA_ROOT,
"harvard_corpus",
f"law.free.cap.{reporter}.{volumes[0]}",
f"{page}.*.json",
)
return [glob_path]
elif reporter and volumes:
glob_paths = []
for volume in volumes:
glob_path = os.path.join(
settings.MEDIA_ROOT,
"harvard_corpus",
f"law.free.cap.{reporter}.{volume}",
"*.json",
)
glob_paths.append(glob_path)
return glob_paths
elif reporter:
reporter_key = ".".join(["law.free.cap", reporter])
glob_path = os.path.join(
settings.MEDIA_ROOT,
"harvard_corpus",
f"{reporter_key}.*/*.json",
)
else:
glob_path = os.path.join(
settings.MEDIA_ROOT, "harvard_corpus", "law.free.cap.*/*.json"
)
return [glob_path]
|
520 | def ignore_couch_changes_for_sql_domains(change):
if not change.metadata or not change.metadata.domain:
return False
if change.metadata.data_source_type == SOURCE_COUCH and should_use_sql_backend(change.metadata.domain):
return True
| def is_couch_change_for_sql_domain(change):
if not change.metadata or not change.metadata.domain:
return False
if change.metadata.data_source_type == SOURCE_COUCH and should_use_sql_backend(change.metadata.domain):
return True
|
19,497 | def _develop_specs_from_env(spec, env):
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
if not dev_info:
return
path = os.path.normpath(os.path.join(env.path, dev_info["path"]))
if "dev_path" in spec.variants:
error_msg = (
"The dev_path for spec {name} is not connected to a valid environment path."
"Please note that develop specs can only be used inside an environment"
"Path difs are \n\tdev_path:{dev_path}\n\tenv_based_path:{env_path}"
)
error_msg.format(name=spec.name, dev_path=spec.variants("dev_path"), env_path=path)
assert spec.variants["dev_path"].value == path, error_msg
else:
spec.variants.setdefault("dev_path", spack.variant.SingleValuedVariant("dev_path", path))
spec.constrain(dev_info["spec"])
| def _develop_specs_from_env(spec, env):
dev_info = env.dev_specs.get(spec.name, {}) if env else {}
if not dev_info:
return
path = os.path.normpath(os.path.join(env.path, dev_info["path"]))
if "dev_path" in spec.variants:
error_msg = (
"The dev_path for spec {name} is not connected to a valid environment path."
"Please note that develop specs can only be used inside an environment"
"These paths should be the same:\n\tdev_path:{dev_path}\n\tenv_based_path:{env_path}"
)
error_msg.format(name=spec.name, dev_path=spec.variants("dev_path"), env_path=path)
assert spec.variants["dev_path"].value == path, error_msg
else:
spec.variants.setdefault("dev_path", spack.variant.SingleValuedVariant("dev_path", path))
spec.constrain(dev_info["spec"])
|
5,468 | def global_contribution_form(request):
"""Adds contribution form to the context."""
if enabled(request):
initial_data = {}
if hasattr(request, 'user'):
initial_data = {
'name': request.user.get_full_name() or request.user.username,
'email': request.user.email,
} if request.user.is_authenticated else {}
return {
'contribution_enabled': True,
'contribution_recurring_payment_enabled': recurring_payment_enabled(request),
'contribution_popup': popup_enabled(request),
'contribution_form': ContributionForm(initial=initial_data),
'contribution_recurring_payment_form': ContributionRecurringPaymentForm(initial=initial_data),
'hide_cta': True,
}
return {'contribution_enabled': False}
| def global_contribution_form(request):
"""Adds contribution form to the context."""
if enabled(request):
initial_data = {}
if hasattr(request, 'user'):
initial_data = {
'name': request.user.get_full_name() or request.user.username,
'email': request.user.email,
} if request.user.is_authenticated else {}
return {
'contribution_enabled': True,
'contribution_recurring_payment_enabled': recurring_payment_enabled(request),
'contribution_popup': popup_enabled(request),
'contribution_form': ContributionForm(initial=initial_data),
'recurring_payment_form': RecurringPaymentForm(initial=initial_data),
'hide_cta': True,
}
return {'contribution_enabled': False}
|
22,040 | def download_google_bigquery_table(project, dataset, table, columns=None, condition=None, export=None, client_project=None, credentials=None):
'''Download (stream) an entire Google BigQuery table locally.
:param str project: The Google BigQuery project that owns the table.
:param str dataset: The dataset the table is part of.
:param str table: The name of the table
:param list columns: A list of columns (field names) to download. If None, all columns will be downloaded.
:param str condition: SQL text filtering statement, similar to a WHERE clause in a query. Aggregates are not supported.
:param str export: Pass an filename or path to download the table as an Apache Arrow file, and leverage memory mapping. If `None` the DataFrame is in memory.
:param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, it will be set with the same value as `project`.
:param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details.
:rtype: DataFrame
Example:
>>> import os
>>> os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json'
>>> from vaex.contrib.io.gbq import download_google_bigquery_table
>>> client_project = 'my_project_id'
>>> project = 'bigquery-public-data'
>>> dataset = 'ml_datasets'
>>> table = 'iris'
>>> columns = ['species', 'sepal_width', 'petal_width']
>>> conditions = 'species = "virginica"'
>>> df = download_google_bigquery_table(project=project,
dataset=dataset,
table=table,
columns=columns,
condition=conditions,
client_project=client_project)
>>> df.head(3)
# sepal_width petal_width species
0 2.5 1.7 virginica
1 2.5 2 virginica
2 2.2 1.5 virginica
>>>
'''
# Instantiate the table path and the reading session
bq_table = f'projects/{project}/datasets/{dataset}/tables/{table}'
req_sess = google.cloud.bigquery_storage.types.ReadSession(table=bq_table, data_format=google.cloud.bigquery_storage.types.DataFormat.ARROW)
# Read options
req_sess.read_options.selected_fields = columns
req_sess.read_options.row_restriction = condition
# Instantiate the reading client
client = google.cloud.bigquery_storage.BigQueryReadClient(credentials=credentials)
parent = f'projects/{client_project or project}'
session = client.create_read_session(parent=parent, read_session=req_sess, max_stream_count=1)
reader = client.read_rows(session.streams[0].name)
if export is None:
arrow_table = reader.to_arrow(session)
return vaex.from_arrow_table(arrow_table)
else:
# We need to get the schema first - Get one RecordsBatch manually to get the schema
# Get the pages iterator
pages = reader.rows(session).pages
# Get the first batch
first_batch = pages.__next__().to_arrow()
# Get the schema
schema = first_batch.schema
# This does the writing - streams the batches to disk!
with vaex.file.open(path=export, mode='wb') as sink:
with pa.RecordBatchStreamWriter(sink, schema) as writer:
writer.write_batch(first_batch)
for page in pages:
batch = page.to_arrow()
writer.write_batch(batch)
return vaex.open(export)
| def from_table(project, dataset, table, columns=None, condition=None, export=None, client_project=None, credentials=None):
'''Download (stream) an entire Google BigQuery table locally.
:param str project: The Google BigQuery project that owns the table.
:param str dataset: The dataset the table is part of.
:param str table: The name of the table
:param list columns: A list of columns (field names) to download. If None, all columns will be downloaded.
:param str condition: SQL text filtering statement, similar to a WHERE clause in a query. Aggregates are not supported.
:param str export: Pass an filename or path to download the table as an Apache Arrow file, and leverage memory mapping. If `None` the DataFrame is in memory.
:param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, it will be set with the same value as `project`.
:param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details.
:rtype: DataFrame
Example:
>>> import os
>>> os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json'
>>> from vaex.contrib.io.gbq import download_google_bigquery_table
>>> client_project = 'my_project_id'
>>> project = 'bigquery-public-data'
>>> dataset = 'ml_datasets'
>>> table = 'iris'
>>> columns = ['species', 'sepal_width', 'petal_width']
>>> conditions = 'species = "virginica"'
>>> df = download_google_bigquery_table(project=project,
dataset=dataset,
table=table,
columns=columns,
condition=conditions,
client_project=client_project)
>>> df.head(3)
# sepal_width petal_width species
0 2.5 1.7 virginica
1 2.5 2 virginica
2 2.2 1.5 virginica
>>>
'''
# Instantiate the table path and the reading session
bq_table = f'projects/{project}/datasets/{dataset}/tables/{table}'
req_sess = google.cloud.bigquery_storage.types.ReadSession(table=bq_table, data_format=google.cloud.bigquery_storage.types.DataFormat.ARROW)
# Read options
req_sess.read_options.selected_fields = columns
req_sess.read_options.row_restriction = condition
# Instantiate the reading client
client = google.cloud.bigquery_storage.BigQueryReadClient(credentials=credentials)
parent = f'projects/{client_project or project}'
session = client.create_read_session(parent=parent, read_session=req_sess, max_stream_count=1)
reader = client.read_rows(session.streams[0].name)
if export is None:
arrow_table = reader.to_arrow(session)
return vaex.from_arrow_table(arrow_table)
else:
# We need to get the schema first - Get one RecordsBatch manually to get the schema
# Get the pages iterator
pages = reader.rows(session).pages
# Get the first batch
first_batch = pages.__next__().to_arrow()
# Get the schema
schema = first_batch.schema
# This does the writing - streams the batches to disk!
with vaex.file.open(path=export, mode='wb') as sink:
with pa.RecordBatchStreamWriter(sink, schema) as writer:
writer.write_batch(first_batch)
for page in pages:
batch = page.to_arrow()
writer.write_batch(batch)
return vaex.open(export)
|
54,828 | def chop_in_blocks_vector_multi(v, idtodelete):
"""
Splits an array of vectors into two arrays of vectors, where
idtodelete specifies which elements of the vectors go into vb
"""
idtokeep = np.sort(list(set(np.arange(len(v[0]))) - set(idtodelete)))
va = v[:, idtokeep]
vb = v[:, idtodelete]
return (va, vb)
| def chop_in_blocks_vector_multi(v, idtodelete):
"""
Splits an array of vectors into two arrays of vectors, where
idtodelete specifies which elements of the vectors go into vb
"""
index_diff_set = set(np.arange(len(v[0]))) - set(idtodelete)
idtokeep = np.sort(list(index_diff_set))
va = v[:, idtokeep]
vb = v[:, idtodelete]
return (va, vb)
|
27,962 | def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
readable format.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
if len(args.logfile) != 1:
LOG.warning("Only one log file can be processed right now!")
sys.exit(1)
args.output_path = os.path.abspath(args.output_path)
if os.path.exists(args.output_path) and \
not os.path.isdir(args.output_path):
LOG.error("The given output path is not a directory: " +
args.output_path)
sys.exit(1)
if 'enable_all' in args:
LOG.info("'--enable-all' was supplied for this analysis.")
# We clear the output directory in the following cases.
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
if 'ctu_phases' in args and args.ctu_phases[0] and \
os.path.isdir(ctu_dir):
# Clear the CTU-dir if the user turned on the collection phase.
LOG.debug("Previous CTU contents have been deleted.")
shutil.rmtree(ctu_dir)
if 'clean' in args and os.path.isdir(args.output_path):
LOG.info("Previous analysis results in '%s' have been removed, "
"overwriting with current result", args.output_path)
shutil.rmtree(args.output_path)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
LOG.debug("args: " + str(args))
LOG.debug("Output will be stored to: '" + args.output_path + "'")
# Process the skip list if present.
skip_handler = __get_skip_handler(args)
# Enable alpha uniqueing by default if ctu analysis is used.
if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
args.compile_uniqueing = "alpha"
compiler_info_file = None
if 'compiler_info_file' in args:
LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
if not os.path.exists(args.compiler_info_file):
LOG.error("Compiler info file %s does not exist",
args.compiler_info_file)
sys.exit(1)
compiler_info_file = args.compiler_info_file
report_dir = args.output_path
context = analyzer_context.get_context()
analyzer_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
analyzer_binary = context.analyzer_binaries.get("clangsa")
try:
analyzer_version_info = clangsa.version.get(analyzer_binary,
analyzer_env)
except subprocess.CalledProcessError as cerr:
LOG.error('Failed to get and parse clang version: %s',
analyzer_binary)
LOG.error(cerr)
analyzer_version_info = None
# Parse the JSON CCDBs and retrieve the compile commands.
actions = []
for log_file in args.logfile:
if not os.path.exists(log_file):
LOG.error("The specified logfile '%s' does not exist!",
log_file)
continue
actions += log_parser.parse_unique_log(
load_json_or_empty(log_file),
report_dir,
args.compile_uniqueing,
compiler_info_file,
args.keep_gcc_include_fixed,
analyzer_version_info,
analyzer_env)
if not actions:
LOG.info("No analysis is required.\nThere were no compilation "
"commands in the provided compilation database or "
"all of them were skipped.")
sys.exit(0)
uniqued_compilation_db_file = os.path.join(
args.output_path, "unique_compile_commands.json")
with open(uniqued_compilation_db_file, 'w') as f:
json.dump(actions, f,
cls=log_parser.CompileCommandEncoder)
metadata = {'action_num': len(actions),
'command': sys.argv,
'versions': {
'codechecker': "{0} ({1})".format(
context.package_git_tag,
context.package_git_hash)},
'working_directory': os.getcwd(),
'output_path': args.output_path,
'result_source_files': {}}
if 'name' in args:
metadata['name'] = args.name
# Update metadata dictionary with old values.
metadata_file = os.path.join(args.output_path, 'metadata.json')
if os.path.exists(metadata_file):
metadata_prev = load_json_or_empty(metadata_file)
metadata['result_source_files'] = \
metadata_prev['result_source_files']
analyzer.perform_analysis(args, skip_handler, context, actions, metadata)
__update_skip_file(args)
LOG.debug("Analysis metadata write to '%s'", metadata_file)
with open(metadata_file, 'w') as metafile:
json.dump(metadata, metafile)
# WARN: store command will search for this file!!!!
compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
try:
source = os.path.abspath(args.logfile[0])
target = os.path.abspath(compile_cmd_json)
if source != target:
shutil.copyfile(source, target)
except shutil.Error:
LOG.debug("Compilation database JSON file is the same.")
except Exception:
LOG.debug("Copying compilation database JSON file failed.")
try:
from codechecker_analyzer import analyzer_statistics
analyzer_statistics.collect(metadata, "analyze")
except Exception:
pass
| def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
readable format.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
if len(args.logfile) != 1:
LOG.warning("Only one log file can be processed right now!")
sys.exit(1)
args.output_path = os.path.abspath(args.output_path)
if os.path.exists(args.output_path) and \
not os.path.isdir(args.output_path):
LOG.error("The given output path is not a directory: " +
args.output_path)
sys.exit(1)
if 'enable_all' in args:
LOG.info("'--enable-all' was supplied for this analysis.")
# We clear the output directory in the following cases.
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
if 'ctu_phases' in args and args.ctu_phases[0] and \
os.path.isdir(ctu_dir):
# Clear the CTU-dir if the user turned on the collection phase.
LOG.debug("Previous CTU contents have been deleted.")
shutil.rmtree(ctu_dir)
if 'clean' in args and os.path.isdir(args.output_path):
LOG.info("Previous analysis results in '%s' have been removed, "
"overwriting with current result", args.output_path)
shutil.rmtree(args.output_path)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
LOG.debug("args: " + str(args))
LOG.debug("Output will be stored to: '" + args.output_path + "'")
# Process the skip list if present.
skip_handler = __get_skip_handler(args)
# Enable alpha uniqueing by default if ctu analysis is used.
if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
args.compile_uniqueing = "alpha"
compiler_info_file = None
if 'compiler_info_file' in args:
LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
if not os.path.exists(args.compiler_info_file):
LOG.error("Compiler info file %s does not exist",
args.compiler_info_file)
sys.exit(1)
compiler_info_file = args.compiler_info_file
report_dir = args.output_path
context = analyzer_context.get_context()
analyzer_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
analyzer_binary = context.analyzer_binaries.get(clangsa.ANALYZER_NAME)
try:
analyzer_version_info = clangsa.version.get(analyzer_binary,
analyzer_env)
except subprocess.CalledProcessError as cerr:
LOG.error('Failed to get and parse clang version: %s',
analyzer_binary)
LOG.error(cerr)
analyzer_version_info = None
# Parse the JSON CCDBs and retrieve the compile commands.
actions = []
for log_file in args.logfile:
if not os.path.exists(log_file):
LOG.error("The specified logfile '%s' does not exist!",
log_file)
continue
actions += log_parser.parse_unique_log(
load_json_or_empty(log_file),
report_dir,
args.compile_uniqueing,
compiler_info_file,
args.keep_gcc_include_fixed,
analyzer_version_info,
analyzer_env)
if not actions:
LOG.info("No analysis is required.\nThere were no compilation "
"commands in the provided compilation database or "
"all of them were skipped.")
sys.exit(0)
uniqued_compilation_db_file = os.path.join(
args.output_path, "unique_compile_commands.json")
with open(uniqued_compilation_db_file, 'w') as f:
json.dump(actions, f,
cls=log_parser.CompileCommandEncoder)
metadata = {'action_num': len(actions),
'command': sys.argv,
'versions': {
'codechecker': "{0} ({1})".format(
context.package_git_tag,
context.package_git_hash)},
'working_directory': os.getcwd(),
'output_path': args.output_path,
'result_source_files': {}}
if 'name' in args:
metadata['name'] = args.name
# Update metadata dictionary with old values.
metadata_file = os.path.join(args.output_path, 'metadata.json')
if os.path.exists(metadata_file):
metadata_prev = load_json_or_empty(metadata_file)
metadata['result_source_files'] = \
metadata_prev['result_source_files']
analyzer.perform_analysis(args, skip_handler, context, actions, metadata)
__update_skip_file(args)
LOG.debug("Analysis metadata write to '%s'", metadata_file)
with open(metadata_file, 'w') as metafile:
json.dump(metadata, metafile)
# WARN: store command will search for this file!!!!
compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
try:
source = os.path.abspath(args.logfile[0])
target = os.path.abspath(compile_cmd_json)
if source != target:
shutil.copyfile(source, target)
except shutil.Error:
LOG.debug("Compilation database JSON file is the same.")
except Exception:
LOG.debug("Copying compilation database JSON file failed.")
try:
from codechecker_analyzer import analyzer_statistics
analyzer_statistics.collect(metadata, "analyze")
except Exception:
pass
|
32,448 | def main() -> None: # pragma: no cover
params = demisto.params()
url = params.get('url')
api_version = params.get('api_version')
token = demisto.params().get('credentials', {}).get('password')
base_url = urljoin(url, f'/api/{api_version}/')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
first_fetch = params.get('first_fetch')
max_fetch = params.get('max_fetch')
vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(base_url, token, api_version, verify_certificate, proxy)
last_run = demisto.getLastRun()
if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
and 'network' not in last_run:
last_run = arg_to_seconds_timestamp(first_fetch)
last_run = {
'alert': last_run,
'application': last_run,
'audit': last_run,
'network': last_run
}
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_version, last_run)
return_results(result)
elif demisto.command() == 'netskope-get-events':
if api_version == 'v1':
return_results(v1_get_events_command(client, demisto.args(), last_run))
else:
return_results(v2_get_events_command(client, demisto.args(), last_run))
elif demisto.command() == 'fetch-events':
if api_version == 'v1':
events = client.get_events_request_v1(last_run, max_fetch)
alerts = client.v1_get_alerts_request(last_run, max_fetch)
if alerts:
events.extend(alerts)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
else:
events = client.get_events_request_v2(last_run, max_fetch)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
| def main() -> None: # pragma: no cover
params = demisto.params()
url = params.get('url')
api_version = params.get('api_version')
token = demisto.params().get('credentials', {}).get('password')
base_url = urljoin(url, f'/api/{api_version}/')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
first_fetch = params.get('first_fetch')
max_fetch = arg_to_number(params.get('max_fetch'))
vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(base_url, token, api_version, verify_certificate, proxy)
last_run = demisto.getLastRun()
if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
and 'network' not in last_run:
last_run = arg_to_seconds_timestamp(first_fetch)
last_run = {
'alert': last_run,
'application': last_run,
'audit': last_run,
'network': last_run
}
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_version, last_run)
return_results(result)
elif demisto.command() == 'netskope-get-events':
if api_version == 'v1':
return_results(v1_get_events_command(client, demisto.args(), last_run))
else:
return_results(v2_get_events_command(client, demisto.args(), last_run))
elif demisto.command() == 'fetch-events':
if api_version == 'v1':
events = client.get_events_request_v1(last_run, max_fetch)
alerts = client.v1_get_alerts_request(last_run, max_fetch)
if alerts:
events.extend(alerts)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
else:
events = client.get_events_request_v2(last_run, max_fetch)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
47,113 | def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt_neo_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
if "global_step" not in name and "adam" not in name:
array = tf.train.load_variable(tf_path, name)
array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()
name = name.replace("attn/q", "attn/attention/q_proj/w")
name = name.replace("attn/k", "attn/attention/k_proj/w")
name = name.replace("attn/v", "attn/attention/v_proj/w")
name = name.replace("attn/o", "attn/attention/out_proj/w")
name = name.replace("norm_1", "ln_1")
name = name.replace("norm_2", "ln_2")
name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b")
name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w")
name = name.replace("conv1d_main/c_fc/bias", "c_fc/b")
name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w")
name = name.replace("conv1d_main/c_proj/bias", "c_proj/b")
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name[5:] # skip "gpt2/"
name = name.split("/")
pointer = model.transformer
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if name[-1] == "w" and name[-2] in ["out_proj", "k_proj", "q_proj", "v_proj", "c_proj", "c_fc"]:
array = array.transpose()
if name == ["wte"]:
# if vocab is padded, then trim off the padding embeddings
array = array[:config.vocab_size]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
# init the final linear layer using word embeddings
embs = model.transformer.wte.weight
lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False)
lin.weight = embs
model.set_output_embeddings(lin)
return model
| def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt_neo_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
if "global_step" not in name and "adam" not in name:
array = tf.train.load_variable(tf_path, name)
array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()
name = name.replace("attn/q", "attn/attention/q_proj/w")
name = name.replace("attn/k", "attn/attention/k_proj/w")
name = name.replace("attn/v", "attn/attention/v_proj/w")
name = name.replace("attn/o", "attn/attention/out_proj/w")
name = name.replace("norm_1", "ln_1")
name = name.replace("norm_2", "ln_2")
name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b")
name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w")
name = name.replace("conv1d_main/c_fc/bias", "c_fc/b")
name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w")
name = name.replace("conv1d_main/c_proj/bias", "c_proj/b")
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name[5:] # skip "gpt2/"
name = name.split("/")
pointer = model.transformer
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if name[-1] == "w" and name[-2] in ["out_proj", "k_proj", "q_proj", "v_proj", "c_proj", "c_fc"]:
array = array.transpose()
if name == ["wte"]:
# if vocab is padded, then trim off the padding embeddings
array = array[: config.vocab_size]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
# init the final linear layer using word embeddings
embs = model.transformer.wte.weight
lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False)
lin.weight = embs
model.set_output_embeddings(lin)
return model
|
25,005 | def _is_part_of_assignment_target(node: nodes.NodeNG) -> bool:
"""Check whether use of a variable is happening as part of the left-hand
side of an assignment.
This requires recursive checking, because destructuring assignment can have
arbitrarily nested tuples and lists to unpack.
"""
if isinstance(node.parent, nodes.Assign):
return node in node.parent.targets
if isinstance(node.parent, nodes.AugAssign):
return node == node.parent.target
if isinstance(node.parent, (nodes.Tuple, nodes.List)):
return _is_part_of_assignment_target(node.parent)
return None
| def _is_part_of_assignment_target(node: nodes.NodeNG) -> bool:
"""Check whether use of a variable is happening as part of the left-hand
side of an assignment.
This requires recursive checking, because destructuring assignment can have
arbitrarily nested tuples and lists to unpack.
"""
if isinstance(node.parent, nodes.Assign):
return node in node.parent.targets
if isinstance(node.parent, nodes.AugAssign):
return node == node.parent.target
if isinstance(node.parent, nodes.BaseContainer):
return _is_part_of_assignment_target(node.parent)
return None
|
14,540 | def validate_nslr_data(eye_positions: np.ndarray, eye_timestamps: np.ndarray):
def has_nan(arr: np.ndarray):
return np.any(np.isnan(arr))
def is_monotonic(arr: np.ndarray):
return np.all(arr[:-1] <= arr[1:])
def is_unique(arr: np.ndarray):
return arr.shape == np.unique(arr, axis=0).shape
if has_nan(eye_positions):
raise ValueError("Gaze data contains NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps contains NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps are not monotonic")
if not is_unique(eye_timestamps):
raise ValueError("Gaze timestamps are not unique. Please recalculate gaze mapping with only 1 mapper enabled")
| def validate_nslr_data(eye_positions: np.ndarray, eye_timestamps: np.ndarray):
def has_nan(arr: np.ndarray):
return np.any(np.isnan(arr))
def is_monotonic(arr: np.ndarray):
return np.all(arr[:-1] <= arr[1:])
def is_unique(arr: np.ndarray):
return arr.shape == np.unique(arr, axis=0).shape
if has_nan(eye_positions):
raise ValueError("Gaze data contains NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps contains NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps are not monotonic")
if not is_unique(eye_timestamps):
raise ValueError("Gaze timestamps are not unique. If you are using Offline Calibration, please disable all but one gaze mapping section.")
|
43,366 | def bloch_messiah(cov):
r"""Performs the Bloch-Messiah decomposition of single mode
Gaussian state.
Args:
cov (array): :math:`2\times 2` covariance matrix.
Returns:
tuple: mean photon number, rotation angle, and
squeezing magnitude of the Gaussian state.
"""
det = np.linalg.det(cov)
nbar = (np.sqrt(det)-1)/2
mm = cov/np.sqrt(det)
a = mm[0, 0]
b = mm[0, 1]
r = -0.5*np.arccosh((1+a*a+b*b)/(2*a))
phi = 0.5*np.arctan2((2*a*b), (-1+a*a-b*b))
return nbar, phi, r
| def bloch_messiah(cov):
r"""Performs the Bloch-Messiah decomposition of a single-mode
Gaussian state.
Args:
cov (array): :math:`2\times 2` covariance matrix.
Returns:
tuple: mean photon number, rotation angle, and
squeezing magnitude of the Gaussian state.
"""
det = np.linalg.det(cov)
nbar = (np.sqrt(det)-1)/2
mm = cov/np.sqrt(det)
a = mm[0, 0]
b = mm[0, 1]
r = -0.5*np.arccosh((1+a*a+b*b)/(2*a))
phi = 0.5*np.arctan2((2*a*b), (-1+a*a-b*b))
return nbar, phi, r
|
467 | def send_HTML_email(subject, recipient, html_content, text_content=None,
cc=None, email_from=settings.DEFAULT_FROM_EMAIL,
file_attachments=None, bcc=None,
smtp_exception_skip_list=None, messaging_event_id=None):
recipients = list(recipient) if not isinstance(recipient, str) else [recipient]
filtered_recipients = get_valid_recipients(recipients)
bounced_addresses = list(set(recipients) - set(filtered_recipients))
if bounced_addresses and messaging_event_id:
mark_subevent_bounced(bounced_addresses, messaging_event_id)
if not filtered_recipients:
# todo address root issues by throwing a real error to catch upstream
# fail silently for now to fix time-sensitive SES issue
return
if not isinstance(html_content, str):
html_content = html_content.decode('utf-8')
if not text_content:
text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
NO_HTML_EMAIL_MESSAGE)
elif not isinstance(text_content, str):
text_content = text_content.decode('utf-8')
headers = {'From': email_from} # From-header
if settings.RETURN_PATH_EMAIL:
headers['Return-Path'] = settings.RETURN_PATH_EMAIL
if messaging_event_id is not None:
headers[COMMCARE_MESSAGE_ID_HEADER] = messaging_event_id
if settings.SES_CONFIGURATION_SET is not None:
headers[SES_CONFIGURATION_SET_HEADER] = settings.SES_CONFIGURATION_SET
connection = get_connection()
msg = EmailMultiAlternatives(subject, text_content, email_from,
filtered_recipients, headers=headers,
connection=connection, cc=cc, bcc=bcc)
for file in (file_attachments or []):
if file:
msg.attach(file["title"], file["file_obj"].getvalue(),
file["mimetype"])
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except SMTPDataError as e:
# If the SES configuration has not been properly set up, resend the message
if (
b"Configuration Set does not exist" in e.smtp_error
and SES_CONFIGURATION_SET_HEADER in msg.extra_headers
):
del msg.extra_headers[SES_CONFIGURATION_SET_HEADER]
msg.send()
notify_exception(None, message="SES Configuration Set missing", details={'error': e})
else:
raise
except SMTPSenderRefused as e:
if smtp_exception_skip_list and e.smtp_code in smtp_exception_skip_list:
raise e
else:
error_subject = _('ERROR: Could not send "%(subject)s"') % {
'subject': subject,
}
if e.smtp_code in LARGE_FILE_SIZE_ERROR_CODES:
error_text = _('Could not send email: file size is too large.')
else:
error_text = e.smtp_error
error_text = '%s\n\n%s' % (
error_text,
_('Please contact %(support_email)s for assistance.') % {
'support_email': settings.SUPPORT_EMAIL,
},
)
error_msg = EmailMultiAlternatives(
error_subject,
error_text,
email_from,
filtered_recipients,
headers=headers,
connection=connection,
cc=cc,
bcc=bcc,
)
error_msg.send()
| def send_HTML_email(subject, recipient, html_content, text_content=None,
cc=None, email_from=settings.DEFAULT_FROM_EMAIL,
file_attachments=None, bcc=None,
smtp_exception_skip_list=None, messaging_event_id=None):
recipients = list(recipient) if not isinstance(recipient, str) else [recipient]
filtered_recipients = get_valid_recipients(recipients)
bounced_addresses = list(set(recipients) - set(filtered_recipients))
if bounced_addresses and messaging_event_id:
mark_subevent_bounced(bounced_addresses, messaging_event_id)
if not filtered_recipients:
# todo address root issues by throwing a real error to catch upstream
# fail silently for now to fix time-sensitive SES issue
return
if not isinstance(html_content, str):
html_content = html_content.decode('utf-8')
if not text_content:
text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
NO_HTML_EMAIL_MESSAGE)
elif not isinstance(text_content, str):
text_content = text_content.decode('utf-8')
headers = {'From': email_from} # From-header
if settings.RETURN_PATH_EMAIL:
headers['Return-Path'] = settings.RETURN_PATH_EMAIL
if messaging_event_id is not None:
headers[COMMCARE_MESSAGE_ID_HEADER] = messaging_event_id
if settings.SES_CONFIGURATION_SET is not None:
headers[SES_CONFIGURATION_SET_HEADER] = settings.SES_CONFIGURATION_SET
connection = get_connection()
msg = EmailMultiAlternatives(subject, text_content, email_from,
filtered_recipients, headers=headers,
connection=connection, cc=cc, bcc=bcc)
for file in (file_attachments or []):
if file:
msg.attach(file["title"], file["file_obj"].getvalue(),
file["mimetype"])
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except SMTPDataError as e:
# If the SES configuration has not been properly set up, resend the message
if (
"Configuration Set does not exist" in repr(e.smtp_error)
and SES_CONFIGURATION_SET_HEADER in msg.extra_headers
):
del msg.extra_headers[SES_CONFIGURATION_SET_HEADER]
msg.send()
notify_exception(None, message="SES Configuration Set missing", details={'error': e})
else:
raise
except SMTPSenderRefused as e:
if smtp_exception_skip_list and e.smtp_code in smtp_exception_skip_list:
raise e
else:
error_subject = _('ERROR: Could not send "%(subject)s"') % {
'subject': subject,
}
if e.smtp_code in LARGE_FILE_SIZE_ERROR_CODES:
error_text = _('Could not send email: file size is too large.')
else:
error_text = e.smtp_error
error_text = '%s\n\n%s' % (
error_text,
_('Please contact %(support_email)s for assistance.') % {
'support_email': settings.SUPPORT_EMAIL,
},
)
error_msg = EmailMultiAlternatives(
error_subject,
error_text,
email_from,
filtered_recipients,
headers=headers,
connection=connection,
cc=cc,
bcc=bcc,
)
error_msg.send()
|
31,584 | def get_ip_neighbors_command(client, args):
ipaddress = args.get('ipaddress')
res = client.get_ip_neighbors(ipaddress=ipaddress)
readable_output = tableToMarkdown(
f"IP neighbors for {ipaddress}:",
[{
"IP": x.get('ip', ''),
"Hostnames": x.get('hostnames', None),
"Sites": x.get('sites', 0),
"Ports": x.get('ports', None),
"Active Egress": x.get('active_egress')
} for x in res],
["IP", "Hostnames", "Sites", "Ports", "Active Egress"])
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.IP",
outputs_key_field="ip",
outputs={
"ip": ipaddress,
"blocks": res
},
readable_output=readable_output
)
return_results(command_results)
create_standard_ip_context(
ip_data=[{
"Address": x.get('ip').split("/")[0]
} for x in res])
| def get_ip_neighbors_command(client, args):
ipaddress = args.get('ipaddress')
res = client.get_ip_neighbors(ipaddress=ipaddress)
readable_output = tableToMarkdown(
f"IP neighbors for {ipaddress}:",
[{
"IP": x.get('ip', ''),
"Hostnames": x.get('hostnames', None),
"Sites": x.get('sites', 0),
"Ports": x.get('ports', None),
"Active Egress": x.get('active_egress')
} for x in res],
)
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.IP",
outputs_key_field="ip",
outputs={
"ip": ipaddress,
"blocks": res
},
readable_output=readable_output
)
return_results(command_results)
create_standard_ip_context(
ip_data=[{
"Address": x.get('ip').split("/")[0]
} for x in res])
|
58,118 | def generate_dbotscore(response: Dict) -> List:
"""Creates CommandResult object based on the contents of 'response' argument
and provides DBotScore objects.
Parameters
----------
response : dict
Object returned by ANYRUN API call in 'get_report' function.
Returns
-------
List
A list of CommandResults objects.
"""
data = response.get('data', {})
analysis = data.get('analysis', {})
main_object = analysis.get('content', {}).get('mainObject', {})
submission_type = main_object.get('type')
submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
reputation_map = {
"shared": Common.DBotScore.NONE,
"unknown": Common.DBotScore.NONE,
"whitelisted": Common.DBotScore.GOOD,
"malicious": Common.DBotScore.BAD,
"suspicious": Common.DBotScore.SUSPICIOUS
}
returned_data = []
main_entity = None
main_entity_type = None
# Add the hash or URL first
if submission_type == 'hash':
hashes = main_object.get('hashes', {})
info = main_object.get('info', {})
file_type = info.get('file')
exif = info.get('exif', {})
main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
main_entity_type = FeedIndicatorType.File
dbot_score = Common.DBotScore(
indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
indicator_type=DBotScoreType.FILE,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
returned_data.append(CommandResults(
indicator=Common.File(
dbot_score=dbot_score,
md5=hashes.get('md5'),
sha1=hashes.get('sha1'),
sha256=hashes.get('sha256'),
file_type=file_type,
associated_file_names=exif.get('OriginalFileName')
)
))
else:
main_entity = main_object.get('url')
main_entity_type = FeedIndicatorType.URL
url_outputs = {
'Data': main_object.get('url')
}
dbot_score = Common.DBotScore(
indicator=main_object.get('url'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
if dbot_score.score >= 2:
url_outputs['Malicious'] = {
'Vendor': 'ANYRUN',
'Description': threat_text
}
returned_data.append(CommandResults(
outputs_prefix='URL',
outputs_key_field=['Data'],
outputs=url_outputs,
indicator=Common.URL(
url=main_object.get('url'),
dbot_score=dbot_score,
)
))
# Check if network information is available in the report
if 'network' in data:
network_data = data.get('network')
# Then add all the network-related indicators - 'connections'
if 'connections' in network_data:
connections = network_data.get('connections')
for current_connection in connections:
reputation = current_connection.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_connection.get('IP'),
indicator_type=DBotScoreType.IP,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_connection.get('IP'),
entity_b_type=FeedIndicatorType.IP,
brand="ANYRUN"
)]
ip_indicator = Common.IP(
ip=current_connection.get('IP'),
asn=current_connection.get('ASN'),
port=current_connection.get('Port'),
geo_country=current_connection.get('Country'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_connection.get('IP') not in [
x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_connection.get('IP')}",
[{
"Description": f"This IP was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=ip_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'dnsRequests'
if 'dnsRequests' in network_data:
for current_dnsRequests in network_data.get('dnsRequests'):
reputation = current_dnsRequests.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_dnsRequests.get('Domain'),
indicator_type=DBotScoreType.DOMAIN,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_dnsRequests.get('Domain'),
entity_b_type=FeedIndicatorType.Domain,
brand="ANYRUN"
)]
if "IP" in current_dnsRequests:
for ip in current_dnsRequests.get('IP', []):
relationships.append(
EntityRelationship(
name=EntityRelationship.Relationships.RESOLVES_TO,
entity_a=current_dnsRequests.get('Domain'),
entity_a_type=FeedIndicatorType.Domain,
entity_b=ip,
entity_b_type=FeedIndicatorType.IP
)
)
domain_ip_dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name="ANYRUN",
score=Common.DBotScore.NONE
)
domain_ip_indicator = Common.IP(
ip=ip,
dbot_score=domain_ip_dbot_score
)
returned_data.append(CommandResults(
indicator=domain_ip_indicator,
readable_output=tableToMarkdown(
f"{ip}",
[{
"Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}"
}]
)
))
domain_indicator = Common.Domain(
domain=current_dnsRequests.get('Domain'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_dnsRequests.get('Domain') not in [
x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_dnsRequests.get('Domain')}",
[{
"Description": f"This domain was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=domain_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'httpRequests'
if 'httpRequests' in network_data:
for current_httpRequests in network_data.get('httpRequests'):
reputation = current_httpRequests['Reputation']
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_httpRequests.get('URL'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_httpRequests.get('URL'),
entity_b_type=FeedIndicatorType.URL,
brand="ANYRUN"
)]
url_indicator = Common.URL(
url=current_httpRequests.get('URL'),
geo_country=current_httpRequests.get('Country'),
port=current_httpRequests.get('Port'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_httpRequests.get('URL') not in [
x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_httpRequests.get('URL')}",
[{
"Description": f"This URL was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=url_indicator,
relationships=relationships
))
if 'mitre' in data:
mitre_data = data.get('mitre')
for item in mitre_data:
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=item.get('name'),
entity_b_type='Attack Pattern'
)]
attack_indicator = Common.AttackPattern(
stix_id=None,
value=item.get('name'),
mitre_id=item.get('id')
)
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{item.get('name')}",
[{
"Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=attack_indicator,
relationships=relationships
))
return returned_data
| def generate_dbotscore(response: Dict) -> List:
"""Creates CommandResult object based on the contents of 'response' argument
and provides DBotScore objects.
Parameters
----------
response : dict
Object returned by ANYRUN API call in 'get_report' function.
Returns
-------
List
A list of CommandResults objects.
"""
data = response.get('data', {})
analysis = data.get('analysis', {})
main_object = analysis.get('content', {}).get('mainObject', {})
submission_type = main_object.get('type')
submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
reputation_map = {
"shared": Common.DBotScore.NONE,
"unknown": Common.DBotScore.NONE,
"whitelisted": Common.DBotScore.GOOD,
"malicious": Common.DBotScore.BAD,
"suspicious": Common.DBotScore.SUSPICIOUS
}
returned_data = []
main_entity = None
main_entity_type = None
# Add the hash or URL first
if submission_type == 'hash':
hashes = main_object.get('hashes', {})
info = main_object.get('info', {})
file_type = info.get('file')
exif = info.get('exif', {})
main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
main_entity_type = FeedIndicatorType.File
dbot_score = Common.DBotScore(
indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
indicator_type=DBotScoreType.FILE,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
returned_data.append(CommandResults(
indicator=Common.File(
dbot_score=dbot_score,
md5=hashes.get('md5'),
sha1=hashes.get('sha1'),
sha256=hashes.get('sha256'),
file_type=file_type,
associated_file_names=exif.get('OriginalFileName')
)
))
else:
main_entity = main_object.get('url')
main_entity_type = FeedIndicatorType.URL
url_outputs = {
'Data': main_object.get('url')
}
dbot_score = Common.DBotScore(
indicator=main_object.get('url'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
if dbot_score.score >= 2:
url_outputs['Malicious'] = {
'Vendor': 'ANYRUN',
'Description': threat_text
}
returned_data.append(CommandResults(
outputs_prefix='URL',
outputs_key_field=['Data'],
outputs=url_outputs,
indicator=Common.URL(
url=main_object.get('url'),
dbot_score=dbot_score,
)
))
# Check if network information is available in the report
if 'network' in data:
network_data = data.get('network')
# Then add all the network-related indicators - 'connections'
if 'connections' in network_data:
connections = network_data.get('connections')
for current_connection in connections:
reputation = current_connection.get('Reputation')
if score := reputation_map.get(reputation):
current_dbot_score = Common.DBotScore(
indicator=current_connection.get('IP'),
indicator_type=DBotScoreType.IP,
integration_name='ANYRUN',
score=score
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_connection.get('IP'),
entity_b_type=FeedIndicatorType.IP,
brand="ANYRUN"
)]
ip_indicator = Common.IP(
ip=current_connection.get('IP'),
asn=current_connection.get('ASN'),
port=current_connection.get('Port'),
geo_country=current_connection.get('Country'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_connection.get('IP') not in [
x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_connection.get('IP')}",
[{
"Description": f"This IP was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=ip_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'dnsRequests'
if 'dnsRequests' in network_data:
for current_dnsRequests in network_data.get('dnsRequests'):
reputation = current_dnsRequests.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_dnsRequests.get('Domain'),
indicator_type=DBotScoreType.DOMAIN,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_dnsRequests.get('Domain'),
entity_b_type=FeedIndicatorType.Domain,
brand="ANYRUN"
)]
if "IP" in current_dnsRequests:
for ip in current_dnsRequests.get('IP', []):
relationships.append(
EntityRelationship(
name=EntityRelationship.Relationships.RESOLVES_TO,
entity_a=current_dnsRequests.get('Domain'),
entity_a_type=FeedIndicatorType.Domain,
entity_b=ip,
entity_b_type=FeedIndicatorType.IP
)
)
domain_ip_dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name="ANYRUN",
score=Common.DBotScore.NONE
)
domain_ip_indicator = Common.IP(
ip=ip,
dbot_score=domain_ip_dbot_score
)
returned_data.append(CommandResults(
indicator=domain_ip_indicator,
readable_output=tableToMarkdown(
f"{ip}",
[{
"Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}"
}]
)
))
domain_indicator = Common.Domain(
domain=current_dnsRequests.get('Domain'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_dnsRequests.get('Domain') not in [
x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_dnsRequests.get('Domain')}",
[{
"Description": f"This domain was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=domain_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'httpRequests'
if 'httpRequests' in network_data:
for current_httpRequests in network_data.get('httpRequests'):
reputation = current_httpRequests['Reputation']
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_httpRequests.get('URL'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_httpRequests.get('URL'),
entity_b_type=FeedIndicatorType.URL,
brand="ANYRUN"
)]
url_indicator = Common.URL(
url=current_httpRequests.get('URL'),
geo_country=current_httpRequests.get('Country'),
port=current_httpRequests.get('Port'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_httpRequests.get('URL') not in [
x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_httpRequests.get('URL')}",
[{
"Description": f"This URL was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=url_indicator,
relationships=relationships
))
if 'mitre' in data:
mitre_data = data.get('mitre')
for item in mitre_data:
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=item.get('name'),
entity_b_type='Attack Pattern'
)]
attack_indicator = Common.AttackPattern(
stix_id=None,
value=item.get('name'),
mitre_id=item.get('id')
)
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{item.get('name')}",
[{
"Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=attack_indicator,
relationships=relationships
))
return returned_data
|
58,294 | def is_rpm_distribution(d: Distribution) -> bool:
return d in [
Distribution.fedora,
Distribution.mageia,
Distribution.centos,
Distribution.centos_epel,
Distribution.openmandriva,
Distribution.rocky,
Distribution.rocky_epel,
Distribution.alma,
Distribution.alma_epel
]
| def is_rpm_distribution(d: Distribution) -> bool:
return d in (
Distribution.fedora,
Distribution.mageia,
Distribution.centos,
Distribution.centos_epel,
Distribution.openmandriva,
Distribution.rocky,
Distribution.rocky_epel,
Distribution.alma,
Distribution.alma_epel
)
|
30,777 | def fetch_incidents():
last_run = demisto.getLastRun()
last_incidents_ids = []
if last_run:
last_fetch = last_run.get('time')
last_fetch = datetime.strptime(last_fetch, TIME_FORMAT)
last_incidents_ids = last_run.get('last_event_ids')
else:
# first time fetching
last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0]
LOG('iterating on detections, looking for more recent than {}'.format(last_fetch))
incidents = []
new_incidents_ids = []
for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2):
LOG('found detection #{}'.format(raw_detection['id']))
incident = detection_to_incident(raw_detection)
# the rewJson is a string of dictionary e.g. - ('{"ID":2,"Type":5}')
incident_id = json.loads(incident['rawJSON']).get("ID")
if incident_id not in last_incidents_ids:
# makes sure that the incidents wasn't fetched before
incidents.append(incident)
new_incidents_ids.append(incident_id)
if incidents:
last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812
last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids}
return last_run, incidents
| def fetch_incidents():
last_run = demisto.getLastRun()
last_incidents_ids = []
if last_run:
last_fetch = last_run.get('time')
last_fetch = datetime.strptime(last_fetch, TIME_FORMAT)
last_incidents_ids = last_run.get('last_event_ids')
else:
# first time fetching
last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0]
LOG('iterating on detections, looking for more recent than {}'.format(last_fetch))
incidents = []
new_incidents_ids = []
for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2):
LOG('found detection #{}'.format(raw_detection['id']))
incident = detection_to_incident(raw_detection)
# the rawJson is a string of dictionary e.g. - ('{"ID":2,"Type":5}')
incident_id = json.loads(incident['rawJSON']).get("ID")
if incident_id not in last_incidents_ids:
# makes sure that the incidents wasn't fetched before
incidents.append(incident)
new_incidents_ids.append(incident_id)
if incidents:
last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812
last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids}
return last_run, incidents
|
31,872 | def tc_update_indicator_command():
args = demisto.args()
indicator = args['indicator']
rating = args.get('rating')
confidence = args.get('confidence')
size = args.get('size')
dns_active = args.get('dnsActive')
whois_active = args.get('whoisActive')
false_positive = args.get('falsePositive', 'False') == 'True'
observations = int(args.get('observations', 0))
security_label = args.get('securityLabel')
threat_assess_confidence = int(args.get('threatAssessConfidence', -1))
threat_assess_rating = int(args.get('threatAssessRating', -1))
owner = args.get('owner', demisto.params()['defaultOrg'])
raw_indicators = tc_update_indicator(indicator, rating=rating, confidence=confidence, size=size,
dns_active=dns_active, whois_active=whois_active,
false_positive=false_positive, observations=observations,
security_label=security_label,
threat_assess_confidence=threat_assess_confidence,
threat_assess_rating=threat_assess_rating, owner=owner)
ec, indicators = create_context(raw_indicators)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '\n'.join('Indicator {} Updated Successfully'.format(ind['ID']) for ind in indicators),
'EntryContext': ec
})
| def tc_update_indicator_command():
args = demisto.args()
indicator = args['indicator']
rating = args.get('rating')
confidence = args.get('confidence')
size = args.get('size')
dns_active = args.get('dnsActive')
whois_active = args.get('whoisActive')
false_positive = args.get('falsePositive', 'False') == 'True'
observations = int(args.get('observations', 0))
security_label = args.get('securityLabel')
threat_assess_confidence = int(args.get('threatAssessConfidence', -1))
threat_assess_rating = int(args.get('threatAssessRating', -1))
owner = args.get('owner', demisto.params().get('defaultOrg'))
raw_indicators = tc_update_indicator(indicator, rating=rating, confidence=confidence, size=size,
dns_active=dns_active, whois_active=whois_active,
false_positive=false_positive, observations=observations,
security_label=security_label,
threat_assess_confidence=threat_assess_confidence,
threat_assess_rating=threat_assess_rating, owner=owner)
ec, indicators = create_context(raw_indicators)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '\n'.join('Indicator {} Updated Successfully'.format(ind['ID']) for ind in indicators),
'EntryContext': ec
})
|
33,283 | def convert_otio_to_svg(timeline, filepath):
renderer = SVGRenderer(2406.0, 1054.0)
image_margin = 10.0
font_size = 15
total_duration = 0
min_time = 0.0
max_time = 0.0
all_clips_data = []
clip_count = -1
for curr_clip in timeline.tracks[0]:
if isinstance(curr_clip, otio.schema.Clip):
avlbl_start = total_duration - curr_clip.source_range.start_time.value
min_time = min(min_time, avlbl_start)
src_start = total_duration
total_duration += curr_clip.source_range.duration.value
src_end = total_duration - 1
avlbl_end = (curr_clip.media_reference.available_range.start_time.value +
curr_clip.media_reference.available_range.duration.value -
curr_clip.source_range.start_time.value -
curr_clip.source_range.duration.value + total_duration - 1)
max_time = max(max_time, avlbl_end)
trim_start = curr_clip.source_range.start_time.value
trim_duration = curr_clip.source_range.duration.value
avlbl_duration = curr_clip.media_reference.available_range.duration.value
clip_count += 1
clip_data = ClipData(src_start, src_end, avlbl_start,
avlbl_end, avlbl_duration, trim_start,
trim_duration,
curr_clip.media_reference.target_url, clip_count)
all_clips_data.append(clip_data)
scale_x = (renderer.width - (2.0 * image_margin)) / (max_time - min_time + 1.0)
x_origin = (-min_time) * scale_x
clip_rect_ht = (renderer.height - (2.0 * image_margin) - (2.0 * font_size)) / \
((4.0 + len(all_clips_data)) * 2 - 1)
# Draw Timeline
timeline_origin = Point(x_origin, renderer.height - image_margin - clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(timeline_origin, total_duration * scale_x, clip_rect_ht),
fill_color=gray, border_color=black)
label_text_size = 0.4 * clip_rect_ht
timeline_text_width = renderer.get_text_layout_size("Timeline", label_text_size)
timeline_text_location = Point(
x_origin + (total_duration * scale_x) / 2.0 - (timeline_text_width / 2.0),
timeline_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text="Timeline", location=timeline_text_location,
text_size=label_text_size)
arrow_margin = 10
arrow_start = Point(x_origin + (total_duration * scale_x) / 2.0,
timeline_origin.y - arrow_margin)
arrow_end = Point(x_origin + (total_duration * scale_x) / 2.0,
timeline_origin.y - clip_rect_ht + arrow_margin)
renderer.draw_line(start_point=arrow_start, end_point=arrow_end, stroke_width=2,
stroke_color=black)
for i in range(1, int(total_duration)):
start_pt = Point(x_origin + (i * scale_x), timeline_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw Stack
stack_origin = Point(x_origin,
renderer.height - image_margin - 3 * clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(stack_origin, total_duration * scale_x, clip_rect_ht),
fill_color=gray, border_color=black)
stack_text_size = label_text_size
stack_text_width = renderer.get_text_layout_size("Stack", stack_text_size)
stack_text_location = Point(
x_origin + (total_duration * scale_x) / 2.0 - (stack_text_width / 2.0),
stack_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text="Stack", location=stack_text_location,
text_size=stack_text_size)
arrow_start = Point(x_origin + (total_duration * scale_x) / 2.0,
stack_origin.y - arrow_margin)
arrow_end = Point(x_origin + (total_duration * scale_x) / 2.0,
stack_origin.y - clip_rect_ht + arrow_margin)
renderer.draw_line(start_point=arrow_start, end_point=arrow_end, stroke_width=2,
stroke_color=black)
for i in range(1, int(total_duration)):
start_pt = Point(x_origin + (i * scale_x), stack_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw Track
track_origin = Point(x_origin,
renderer.height - image_margin - 5 * clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(track_origin, total_duration * scale_x, clip_rect_ht),
fill_color=gray, border_color=black)
track_text_size = label_text_size
track_text_width = renderer.get_text_layout_size("Track", track_text_size)
track_text_location = Point(
x_origin + (total_duration * scale_x) / 2.0 - (track_text_width / 2.0),
track_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text="Track", location=track_text_location,
text_size=track_text_size)
arrow_start = Point(x_origin + (total_duration * scale_x) / 2.0,
track_origin.y - arrow_margin)
arrow_end = Point(x_origin + (total_duration * scale_x) / 2.0,
track_origin.y - clip_rect_ht + arrow_margin)
renderer.draw_line(start_point=arrow_start, end_point=arrow_end, stroke_width=2,
stroke_color=black)
for i in range(1, int(total_duration)):
start_pt = Point(x_origin + (i * scale_x), track_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw Clips
for clip_data in all_clips_data:
clip_color = Color.random_color()
clip_origin = Point(x_origin + (clip_data.src_start * scale_x),
renderer.height - image_margin - 7 * clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(clip_origin, clip_data.trim_duration * scale_x, clip_rect_ht),
fill_color=clip_color, border_color=black)
clip_text_size = label_text_size
clip_text = 'Clip-' + str(clip_data.clip_id)
clip_text_width = renderer.get_text_layout_size(clip_text, clip_text_size)
clip_text_location = Point(
clip_origin.x + (clip_data.trim_duration * scale_x) / 2.0 -
(clip_text_width / 2.0),
clip_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text=clip_text, location=clip_text_location,
text_size=clip_text_size)
for i in range(int(clip_data.src_start), int(clip_data.src_end) + 1):
start_pt = Point(x_origin + (i * scale_x), clip_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw media references
trim_media_origin = Point(x_origin + (clip_data.src_start * scale_x),
renderer.height - image_margin - (
7 + (clip_data.clip_id + 1) * 2) * clip_rect_ht)
media_origin = Point(x_origin + (clip_data.avlbl_start * scale_x),
renderer.height - image_margin -
(7 + (clip_data.clip_id + 1) * 2) * clip_rect_ht)
renderer.draw_solid_rect(
Rect(trim_media_origin, clip_data.trim_duration * scale_x,
clip_rect_ht),
fill_color=clip_color)
renderer.draw_rect(Rect(media_origin, clip_data.avlbl_duration * scale_x,
clip_rect_ht))
media_text_size = label_text_size
media_text = 'Media-' + str(clip_data.clip_id)
media_text_width = renderer.get_text_layout_size(media_text, media_text_size)
media_text_location = Point(
media_origin.x + (clip_data.avlbl_duration * scale_x) / 2.0
- (media_text_width / 2.0),
media_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text=media_text, location=media_text_location,
text_size=media_text_size)
clip_media_height_difference = (clip_data.clip_id * 2 + 1) * clip_rect_ht
media_arrow_start = Point(
clip_origin.x + (clip_data.trim_duration * scale_x) / 2.0,
clip_origin.y - arrow_margin)
media_arrow_end = Point(
clip_origin.x + (clip_data.trim_duration * scale_x) / 2.0,
clip_origin.y - clip_media_height_difference + arrow_margin)
renderer.draw_line(start_point=media_arrow_start, end_point=media_arrow_end,
stroke_width=2, stroke_color=black)
for i in range(int(clip_data.avlbl_start),
int(clip_data.avlbl_end) + 1):
start_pt = Point(x_origin + (i * scale_x), media_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
renderer.save_image(filepath)
| def convert_otio_to_svg(timeline, filepath):
renderer = SVGRenderer(2406.0, 1054.0)
image_margin = 10.0
font_size = 15
total_duration = 0
min_time = 0.0
max_time = 0.0
all_clips_data = []
clip_count = -1
for curr_clip in timeline.tracks[0].each_clip():
avlbl_start = total_duration - curr_clip.source_range.start_time.value
min_time = min(min_time, avlbl_start)
src_start = total_duration
total_duration += curr_clip.source_range.duration.value
src_end = total_duration - 1
avlbl_end = (curr_clip.media_reference.available_range.start_time.value +
curr_clip.media_reference.available_range.duration.value -
curr_clip.source_range.start_time.value -
curr_clip.source_range.duration.value + total_duration - 1)
max_time = max(max_time, avlbl_end)
trim_start = curr_clip.source_range.start_time.value
trim_duration = curr_clip.source_range.duration.value
avlbl_duration = curr_clip.media_reference.available_range.duration.value
clip_count += 1
clip_data = ClipData(src_start, src_end, avlbl_start,
avlbl_end, avlbl_duration, trim_start,
trim_duration,
curr_clip.media_reference.target_url, clip_count)
all_clips_data.append(clip_data)
scale_x = (renderer.width - (2.0 * image_margin)) / (max_time - min_time + 1.0)
x_origin = (-min_time) * scale_x
clip_rect_ht = (renderer.height - (2.0 * image_margin) - (2.0 * font_size)) / \
((4.0 + len(all_clips_data)) * 2 - 1)
# Draw Timeline
timeline_origin = Point(x_origin, renderer.height - image_margin - clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(timeline_origin, total_duration * scale_x, clip_rect_ht),
fill_color=gray, border_color=black)
label_text_size = 0.4 * clip_rect_ht
timeline_text_width = renderer.get_text_layout_size("Timeline", label_text_size)
timeline_text_location = Point(
x_origin + (total_duration * scale_x) / 2.0 - (timeline_text_width / 2.0),
timeline_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text="Timeline", location=timeline_text_location,
text_size=label_text_size)
arrow_margin = 10
arrow_start = Point(x_origin + (total_duration * scale_x) / 2.0,
timeline_origin.y - arrow_margin)
arrow_end = Point(x_origin + (total_duration * scale_x) / 2.0,
timeline_origin.y - clip_rect_ht + arrow_margin)
renderer.draw_line(start_point=arrow_start, end_point=arrow_end, stroke_width=2,
stroke_color=black)
for i in range(1, int(total_duration)):
start_pt = Point(x_origin + (i * scale_x), timeline_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw Stack
stack_origin = Point(x_origin,
renderer.height - image_margin - 3 * clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(stack_origin, total_duration * scale_x, clip_rect_ht),
fill_color=gray, border_color=black)
stack_text_size = label_text_size
stack_text_width = renderer.get_text_layout_size("Stack", stack_text_size)
stack_text_location = Point(
x_origin + (total_duration * scale_x) / 2.0 - (stack_text_width / 2.0),
stack_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text="Stack", location=stack_text_location,
text_size=stack_text_size)
arrow_start = Point(x_origin + (total_duration * scale_x) / 2.0,
stack_origin.y - arrow_margin)
arrow_end = Point(x_origin + (total_duration * scale_x) / 2.0,
stack_origin.y - clip_rect_ht + arrow_margin)
renderer.draw_line(start_point=arrow_start, end_point=arrow_end, stroke_width=2,
stroke_color=black)
for i in range(1, int(total_duration)):
start_pt = Point(x_origin + (i * scale_x), stack_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw Track
track_origin = Point(x_origin,
renderer.height - image_margin - 5 * clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(track_origin, total_duration * scale_x, clip_rect_ht),
fill_color=gray, border_color=black)
track_text_size = label_text_size
track_text_width = renderer.get_text_layout_size("Track", track_text_size)
track_text_location = Point(
x_origin + (total_duration * scale_x) / 2.0 - (track_text_width / 2.0),
track_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text="Track", location=track_text_location,
text_size=track_text_size)
arrow_start = Point(x_origin + (total_duration * scale_x) / 2.0,
track_origin.y - arrow_margin)
arrow_end = Point(x_origin + (total_duration * scale_x) / 2.0,
track_origin.y - clip_rect_ht + arrow_margin)
renderer.draw_line(start_point=arrow_start, end_point=arrow_end, stroke_width=2,
stroke_color=black)
for i in range(1, int(total_duration)):
start_pt = Point(x_origin + (i * scale_x), track_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw Clips
for clip_data in all_clips_data:
clip_color = Color.random_color()
clip_origin = Point(x_origin + (clip_data.src_start * scale_x),
renderer.height - image_margin - 7 * clip_rect_ht)
renderer.draw_solid_rect_with_border(
Rect(clip_origin, clip_data.trim_duration * scale_x, clip_rect_ht),
fill_color=clip_color, border_color=black)
clip_text_size = label_text_size
clip_text = 'Clip-' + str(clip_data.clip_id)
clip_text_width = renderer.get_text_layout_size(clip_text, clip_text_size)
clip_text_location = Point(
clip_origin.x + (clip_data.trim_duration * scale_x) / 2.0 -
(clip_text_width / 2.0),
clip_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text=clip_text, location=clip_text_location,
text_size=clip_text_size)
for i in range(int(clip_data.src_start), int(clip_data.src_end) + 1):
start_pt = Point(x_origin + (i * scale_x), clip_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
# Draw media references
trim_media_origin = Point(x_origin + (clip_data.src_start * scale_x),
renderer.height - image_margin - (
7 + (clip_data.clip_id + 1) * 2) * clip_rect_ht)
media_origin = Point(x_origin + (clip_data.avlbl_start * scale_x),
renderer.height - image_margin -
(7 + (clip_data.clip_id + 1) * 2) * clip_rect_ht)
renderer.draw_solid_rect(
Rect(trim_media_origin, clip_data.trim_duration * scale_x,
clip_rect_ht),
fill_color=clip_color)
renderer.draw_rect(Rect(media_origin, clip_data.avlbl_duration * scale_x,
clip_rect_ht))
media_text_size = label_text_size
media_text = 'Media-' + str(clip_data.clip_id)
media_text_width = renderer.get_text_layout_size(media_text, media_text_size)
media_text_location = Point(
media_origin.x + (clip_data.avlbl_duration * scale_x) / 2.0
- (media_text_width / 2.0),
media_origin.y + (clip_rect_ht / 2.0))
renderer.draw_text(text=media_text, location=media_text_location,
text_size=media_text_size)
clip_media_height_difference = (clip_data.clip_id * 2 + 1) * clip_rect_ht
media_arrow_start = Point(
clip_origin.x + (clip_data.trim_duration * scale_x) / 2.0,
clip_origin.y - arrow_margin)
media_arrow_end = Point(
clip_origin.x + (clip_data.trim_duration * scale_x) / 2.0,
clip_origin.y - clip_media_height_difference + arrow_margin)
renderer.draw_line(start_point=media_arrow_start, end_point=media_arrow_end,
stroke_width=2, stroke_color=black)
for i in range(int(clip_data.avlbl_start),
int(clip_data.avlbl_end) + 1):
start_pt = Point(x_origin + (i * scale_x), media_origin.y)
end_pt = Point(start_pt.x, start_pt.y + 0.15 * clip_rect_ht)
renderer.draw_line(start_point=start_pt, end_point=end_pt, stroke_width=1,
stroke_color=black)
renderer.save_image(filepath)
|
5,418 | def build_interface(iface, iface_type, enabled, **settings):
"""
Build an interface script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_interface eth0 eth <settings>
"""
iface_type = iface_type.lower()
if iface_type not in _IFACE_TYPES:
_raise_error_iface(iface, iface_type, _IFACE_TYPES)
if iface_type == "slave":
settings["slave"] = "yes"
if "master" not in settings:
msg = "master is a required setting for slave interfaces"
log.error(msg)
raise AttributeError(msg)
if iface_type == "bond":
if "mode" not in settings:
msg = "mode is required for bond interfaces"
log.error(msg)
raise AttributeError(msg)
settings["mode"] = str(settings["mode"])
if iface_type == "vlan":
settings["vlan"] = "yes"
if iface_type == "bridge" and not __salt__["pkg.version"]("bridge-utils"):
__salt__["pkg.install"]("bridge-utils")
if iface_type in (
"eth",
"bond",
"bridge",
"slave",
"vlan",
"ipip",
"ib",
"alias",
):
opts = _parse_settings_eth(settings, iface_type, enabled, iface)
try:
template = JINJA.get_template("ifcfg.jinja")
except jinja2.exceptions.TemplateNotFound:
log.error("Could not load template ifcfg.jinja")
return ""
log.debug("Interface opts: \n %s", opts)
ifcfg = template.render(opts)
if settings.get("test"):
return _read_temp(ifcfg)
_write_file_iface(iface, ifcfg, _SUSE_NETWORK_SCRIPT_DIR, "ifcfg-{}")
path = os.path.join(_SUSE_NETWORK_SCRIPT_DIR, "ifcfg-{}".format(iface))
return _read_file(path)
| def build_interface(iface, iface_type, enabled, **settings):
"""
Build an interface script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_interface eth0 eth <settings>
"""
iface_type = iface_type.lower()
if iface_type not in _IFACE_TYPES:
_raise_error_iface(iface, iface_type, _IFACE_TYPES)
if iface_type == "slave":
settings["slave"] = "yes"
if "master" not in settings:
msg = "master is a required setting for slave interfaces"
log.error(msg)
raise AttributeError(msg)
if iface_type == "bond":
if "mode" not in settings:
msg = "mode is required for bond interfaces"
log.error(msg)
raise AttributeError(msg)
settings["mode"] = str(settings["mode"])
if iface_type == "vlan":
settings["vlan"] = "yes"
if iface_type == "bridge" and not __salt__["pkg.version"]("bridge-utils"):
__salt__["pkg.install"]("bridge-utils")
if iface_type in (
"eth",
"bond",
"bridge",
"slave",
"vlan",
"ipip",
"ib",
"alias",
):
opts = _parse_settings_eth(settings, iface_type, enabled, iface)
try:
template = JINJA.get_template("ifcfg.jinja")
except jinja2.exceptions.TemplateNotFound:
log.error("Could not load template ifcfg.jinja")
return ""
log.debug("Interface opts:\n%s", opts)
ifcfg = template.render(opts)
if settings.get("test"):
return _read_temp(ifcfg)
_write_file_iface(iface, ifcfg, _SUSE_NETWORK_SCRIPT_DIR, "ifcfg-{}")
path = os.path.join(_SUSE_NETWORK_SCRIPT_DIR, "ifcfg-{}".format(iface))
return _read_file(path)
|
31,148 | def main():
SESSION.proxies = handle_proxy()
client = SixgillEnrichClient(
demisto.params()["client_id"], demisto.params()["client_secret"], CHANNEL_CODE, demisto, SESSION, VERIFY
)
command = demisto.command()
demisto.info(f"Command being called is {command}")
commands: Dict[str, Callable] = {
"test-module": test_module_command,
}
try:
if demisto.command() == "ip":
return_results(ip_reputation_command(client, demisto.args()))
elif demisto.command() == "domain":
return_results(domain_reputation_command(client, demisto.args()))
elif demisto.command() == "url":
return_results(url_reputation_command(client, demisto.args()))
elif demisto.command() == "file":
return_results(file_reputation_command(client, demisto.args()))
elif demisto.command() == "actor":
return_results(actor_reputation_command(client, demisto.args()))
elif demisto.command() == "post_id":
return_results(postid_reputation_command(client, demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Error failed to execute {demisto.command()}, error: [{e}]")
| def main():
SESSION.proxies = handle_proxy()
client = SixgillEnrichClient(
demisto.params()["client_id"], demisto.params()["client_secret"], CHANNEL_CODE, demisto, SESSION, VERIFY
)
command = demisto.command()
demisto.info(f"Command being called is {command}")
commands: Dict[str, Callable] = {
"test-module": test_module_command,
}
try:
if demisto.command() == "ip":
return_results(ip_reputation_command(client, demisto.args()))
elif demisto.command() == "domain":
return_results(domain_reputation_command(client, demisto.args()))
elif command == "url":
return_results(url_reputation_command(client, demisto.args()))
elif demisto.command() == "file":
return_results(file_reputation_command(client, demisto.args()))
elif demisto.command() == "actor":
return_results(actor_reputation_command(client, demisto.args()))
elif demisto.command() == "post_id":
return_results(postid_reputation_command(client, demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Error failed to execute {demisto.command()}, error: [{e}]")
|
35,248 | def real_if_close(x, tol=100):
"""If input is complex with all imaginary parts close to zero, return real
parts.
“Close to zero” is defined as tol * (machine epsilon of the type for x).
.. seealso:: :func:`numpy.real_if_close`
"""
x = cupy.asanyarray(x)
if not issubclass(x.dtype.type, cupy.complexfloating):
return x
if tol > 1:
from numpy.core import getlimits
f = getlimits.finfo(x.dtype.type)
tol = f.eps * tol
if cupy.all(cupy.absolute(x.imag) < tol):
x = x.real
return x
| def real_if_close(x, tol=100):
"""If input is complex with all imaginary parts close to zero, return real
parts.
“Close to zero” is defined as tol * (machine epsilon of the type for x).
.. seealso:: :func:`numpy.real_if_close`
"""
x = cupy.asanyarray(x)
if not issubclass(x.dtype.type, cupy.complexfloating):
return x
if tol > 1:
f = numpy.finfo(x.dtype.type)
tol = f.eps * tol
if cupy.all(cupy.absolute(x.imag) < tol):
x = x.real
return x
|
43,296 | def _chebyshev(one_hot_encoded_row, laplacian, coeffs, deg, max_eig):
"""
This function calculates one column of the Chebyshev approximation of exp(-scale * laplacian) for
all scales.
Args:
one_hot_encoded_row (SparseTensor): a sparse tensor indicating which column (node) to calculate.
laplacian (SparseTensor): the unormalized graph laplacian
coeffs: the Chebyshev coefficients for exp(-scale * x) for each scale in the shape (num_scales, deg)
deg: the degree of the Chebyshev polynomial
Returns:
(num_scales, num_nodes) tensor of the wavelets for each scale for the specified node.
"""
a = max_eig / 2
T_0 = tf.reshape(
tf.sparse.to_dense(one_hot_encoded_row), shape=(laplacian.shape[0], 1)
)
T_1 = (K.dot(laplacian, T_0) - a * T_0) / a
cheby_polys = [T_0, T_1]
for i in range(deg - 1):
cheby_poly = (2 / a) * (
K.dot(laplacian, cheby_polys[-1]) - a * cheby_polys[-1]
) - cheby_polys[-2]
cheby_polys.append(cheby_poly)
cheby_polys = K.squeeze(tf.stack(cheby_polys, axis=0), axis=-1)
return tf.matmul(coeffs, cheby_polys)
| def _chebyshev(one_hot_encoded_row, laplacian, coeffs, deg, max_eig):
"""
This function calculates one column of the Chebyshev approximation of exp(-scale * laplacian) for
all scales.
Args:
one_hot_encoded_row (SparseTensor): a sparse tensor indicating which column (node) to calculate.
laplacian (SparseTensor): the unormalized graph laplacian
coeffs: the Chebyshev coefficients for exp(-scale * x) for each scale in the shape (num_scales, deg)
deg: the degree of the Chebyshev polynomial
Returns:
(num_scales, num_nodes) tensor of the wavelets for each scale for the specified node.
"""
a = max_eig / 2
T_0 = tf.reshape(
tf.sparse.to_dense(one_hot_encoded_row), shape=(laplacian.shape[0],)
)
T_1 = (K.dot(laplacian, T_0) - a * T_0) / a
cheby_polys = [T_0, T_1]
for i in range(deg - 1):
cheby_poly = (2 / a) * (
K.dot(laplacian, cheby_polys[-1]) - a * cheby_polys[-1]
) - cheby_polys[-2]
cheby_polys.append(cheby_poly)
cheby_polys = K.squeeze(tf.stack(cheby_polys, axis=0), axis=-1)
return tf.matmul(coeffs, cheby_polys)
|
9,890 | def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
copy_to=dict(type='path', aliases=['to']),
copy_from=dict(type='path', aliases=['from']),
src=dict(type='str', aliases=['source']),
dst=dict(type='str', aliases=['destination']),
columns=dict(type='list', aliases=['column']),
options=dict(type='dict'),
program=dict(type='bool'),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['copy_from', 'copy_to'],
['copy_from', 'src'],
['copy_to', 'dst'],
]
)
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
if module.params.get('copy_from') and not module.params.get('dst'):
module.fail_json(msg='dst param is necessary with copy_from')
elif module.params.get('copy_to') and not module.params.get('src'):
module.fail_json(msg='src param is necessary with copy_to')
# Connect to DB and make cursor object:
# (Note: autocommit=True because check_mode doesn't use
# transaction mode and ROLLBACK)
db_connection = connect_to_db(module, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = PgCopyData(module, cursor)
################
# If Check mode:
# Note: Transaction mode is not suitable for checking
# because of there may be a lot of data and their actual upload
# can affect database performance. So we just check the src/dst
# table availability int check_src/dst.
#
# If the checks below are passed, changed will be always True:
if module.check_mode:
if module.params.get('dst'):
changed = data.check_table(module.params['dst'])
elif module.params.get('src'):
changed = data.check_table(module.params['src'])
module.exit_json(
changed=changed,
queries=data.executed_queries,
src=data.src,
dst=data.dst,
)
# End of check_mode
###################
# Note: parameters like dst, src, etc. are got
# from module object into data object of PgCopyData class.
# Therefore not need to pass args to the methods below.
if module.params.get('copy_to'):
data.copy_to()
elif module.params.get('copy_from'):
data.copy_from()
# Finish:
cursor.close()
db_connection.close()
# Return some values:
module.exit_json(
changed=data.changed,
queries=data.executed_queries,
src=data.src,
dst=data.dst,
)
| def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
copy_to=dict(type='path', aliases=['to']),
copy_from=dict(type='path', aliases=['from']),
src=dict(type='str', aliases=['source']),
dst=dict(type='str', aliases=['destination']),
columns=dict(type='list', aliases=['column']),
options=dict(type='dict'),
program=dict(type='bool'),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['copy_from', 'copy_to'],
['copy_from', 'src'],
['copy_to', 'dst'],
]
)
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
if module.params.get('copy_from') and not module.params.get('dst'):
module.fail_json(msg='dst param is necessary with copy_from')
elif module.params.get('copy_to') and not module.params.get('src'):
module.fail_json(msg='src param is necessary with copy_to')
# Connect to DB and make cursor object:
# (Note: autocommit=True because check_mode doesn't use
# transaction mode and ROLLBACK)
db_connection = connect_to_db(module, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = PgCopyData(module, cursor)
################
# If Check mode:
# Note: Transaction mode is not suitable for checking
# because of there may be a lot of data and their actual upload
# can affect database performance. So we just check the src/dst
# table availability in check_src/dst.
#
# If the checks below are passed, changed will be always True:
if module.check_mode:
if module.params.get('dst'):
changed = data.check_table(module.params['dst'])
elif module.params.get('src'):
changed = data.check_table(module.params['src'])
module.exit_json(
changed=changed,
queries=data.executed_queries,
src=data.src,
dst=data.dst,
)
# End of check_mode
###################
# Note: parameters like dst, src, etc. are got
# from module object into data object of PgCopyData class.
# Therefore not need to pass args to the methods below.
if module.params.get('copy_to'):
data.copy_to()
elif module.params.get('copy_from'):
data.copy_from()
# Finish:
cursor.close()
db_connection.close()
# Return some values:
module.exit_json(
changed=data.changed,
queries=data.executed_queries,
src=data.src,
dst=data.dst,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.