id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
5,411 |
def test_rename():
"""
Test if the source file exists on the system,
rename it to the named file.
"""
name = "/tmp/salt"
source = "/tmp/salt/salt"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = "Must provide name to file.rename"
ret.update({"comment": comt, "name": ""})
assert filestate.rename("", source) == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_lex = MagicMock(side_effect=[False, True, True])
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "name": name})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(return_value=False)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'Source file "{}" has already been moved out of ' "place".format(
source
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'The target file "{}" exists and will not be ' "overwritten".format(
name
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
mock_rem = MagicMock(side_effect=IOError)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": False}):
comt = 'Failed to delete "{}" in preparation for ' "forced move".format(
name
)
with patch.dict(filestate.__salt__, {"file.remove": mock_rem}):
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source, force=True) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": True}):
comt = 'File "{}" is set to be moved to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": None})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
comt = "The target directory /tmp is not present"
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(
shutil, "move", MagicMock(side_effect=IOError)
):
comt = 'Failed to move "{}" to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(shutil, "move", MagicMock()):
comt = 'Moved "{}" to "{}"'.format(source, name)
ret.update(
{
"name": name,
"comment": comt,
"result": True,
"changes": {name: source},
}
)
assert filestate.rename(name, source) == ret
|
def test_rename():
"""
Test if the source file exists on the system,
rename it to the named file.
"""
name = "/tmp/salt"
source = "/tmp/salt/salt"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = "Must provide name to file.rename"
ret.update({"comment": comt, "name": ""})
assert filestate.rename("", source) == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_lex = MagicMock(side_effect=[False, True, True])
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "name": name})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(return_value=False)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'Source file "{}" has already been moved out of place'.format(
source
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
comt = 'The target file "{}" exists and will not be ' "overwritten".format(
name
)
ret.update({"comment": comt, "result": True})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, True, True])
mock_rem = MagicMock(side_effect=IOError)
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": False}):
comt = 'Failed to delete "{}" in preparation for ' "forced move".format(
name
)
with patch.dict(filestate.__salt__, {"file.remove": mock_rem}):
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source, force=True) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.dict(filestate.__opts__, {"test": True}):
comt = 'File "{}" is set to be moved to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": None})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
comt = "The target directory /tmp is not present"
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(
shutil, "move", MagicMock(side_effect=IOError)
):
comt = 'Failed to move "{}" to "{}"'.format(source, name)
ret.update({"name": name, "comment": comt, "result": False})
assert filestate.rename(name, source) == ret
mock_lex = MagicMock(side_effect=[True, False, False])
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "lexists", mock_lex):
with patch.object(os.path, "isdir", mock_t):
with patch.object(os.path, "islink", mock_f):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.object(shutil, "move", MagicMock()):
comt = 'Moved "{}" to "{}"'.format(source, name)
ret.update(
{
"name": name,
"comment": comt,
"result": True,
"changes": {name: source},
}
)
assert filestate.rename(name, source) == ret
|
13,043 |
def get_channel_slug_from_payment(payment: Payment) -> Union[str, None]:
if payment.checkout:
channel_slug = payment.checkout.channel.slug
elif payment.order:
channel_slug = payment.order.channel.slug
else:
channel_slug = None # type: ignore
return channel_slug
|
def get_channel_slug_from_payment(payment: Payment) -> Optional[str]:
if payment.checkout:
channel_slug = payment.checkout.channel.slug
elif payment.order:
channel_slug = payment.order.channel.slug
else:
channel_slug = None # type: ignore
return channel_slug
|
31,684 |
def add_incident_artifact(incident_id, artifact_type, artifact_value, artifact_description):
body = {'type': artifact_type, 'value': artifact_value, 'description': {'format': 'text',
'content': artifact_description}}
CLIENT.post('/incidents/' + str(incident_id) + '/artifacts', body) # type: ignore
return 'The artifact was added successfully.'
|
def add_incident_artifact(incident_id, artifact_type, artifact_value, artifact_description):
body = {'type': artifact_type, 'value': artifact_value, 'description': {'format': 'text',
'content': artifact_description}}
CLIENT.post('/incidents/' + str(incident_id) + '/artifacts', body) # type: ignore
return f'The artifact was added successfully to incident {incident_id}.'
|
42,423 |
def update_e3dc_battery(address: Iterable[str],external: int,pvother: int,pvwattin: int):
soc = 0
count = 0
speicher = 0
# extpv - > pv Leistung die als externe Produktion an e3dc angeschlossen ist
# nur auslesen wenn als relevant parametrisiert (external = 1) , sonst doppelte Auslesung
extpv = 0
# pv -> pv Leistung die direkt an e3dc angeschlossen ist
pv = 0
for addr in address:
log.debug("Battery Ip: %s, external %d pvother %d pvwatt (input) %d", addr,external,pvother,pvwattin)
if addr != "none":
count=count+1
client = ModbusClient(addr, port=502)
#40082 soc
soc = soc + client.read_holding_registers(40082,ModbusDataType.INT_16,unit=1)
#40069 speicherleistung
speicher = speicher + client.read_holding_registers(40069, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
#40067 pv Leistung
pv = pv + (client.read_holding_registers(40067, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1) * -1)
if external == 1:
#40075 externe pv Leistung
extpv = extpv + client.read_holding_registers(40075, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
soc = soc / count
log.debug("Battery soc %d speicherleistung %d pv %d extpv %d anzahl ip %d", soc,speicher,pv,extpv,count)
cnt= SimCountFactory().get_sim_counter()().sim_count(speicher, prefix="speicher")
get_bat_value_store(1).set(BatState(power=speicher, soc=soc, imported= cnt[0], exported= cnt[1]))
# pvother sagt aus, ob wr definiert ist, und dessen pv Leistungs auch gilt
# wenn 0 gilt nur pv und extpv aus e3dc
pvtotal = pv + extpv
if (pvother == 0) or (pvtotal != 0):
if pvother == 1:
pvtotal = pvtotal + pvwattin
log.debug(" wr update pvother %d pvtotal %d", pvother,pvtotal)
cntpv= SimCountFactory().get_sim_counter()().sim_count(pvtotal, prefix="pv")
get_inverter_value_store(1).set(InverterState(counter=cntpv[1], power=pvtotal))
|
def update_e3dc_battery(address: Iterable[str],external: int,pvother: int,pvwattin: int):
soc = 0
count = 0
speicher = 0
# extpv - > pv Leistung die als externe Produktion an e3dc angeschlossen ist
# nur auslesen wenn als relevant parametrisiert (external = 1) , sonst doppelte Auslesung
extpv = 0
# pv -> pv Leistung die direkt an e3dc angeschlossen ist
pv = 0
for addr in address:
log.debug("Battery Ip: %s, external %d pvother %d pvwatt (input) %d", addr,external,pvother,pvwattin)
if addr != "none":
count += 1
client = ModbusClient(addr, port=502)
#40082 soc
soc = soc + client.read_holding_registers(40082,ModbusDataType.INT_16,unit=1)
#40069 speicherleistung
speicher = speicher + client.read_holding_registers(40069, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
#40067 pv Leistung
pv = pv + (client.read_holding_registers(40067, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1) * -1)
if external == 1:
#40075 externe pv Leistung
extpv = extpv + client.read_holding_registers(40075, ModbusDataType.INT_32, wordorder=Endian.Little,unit=1)
soc = soc / count
log.debug("Battery soc %d speicherleistung %d pv %d extpv %d anzahl ip %d", soc,speicher,pv,extpv,count)
cnt= SimCountFactory().get_sim_counter()().sim_count(speicher, prefix="speicher")
get_bat_value_store(1).set(BatState(power=speicher, soc=soc, imported= cnt[0], exported= cnt[1]))
# pvother sagt aus, ob wr definiert ist, und dessen pv Leistungs auch gilt
# wenn 0 gilt nur pv und extpv aus e3dc
pvtotal = pv + extpv
if (pvother == 0) or (pvtotal != 0):
if pvother == 1:
pvtotal = pvtotal + pvwattin
log.debug(" wr update pvother %d pvtotal %d", pvother,pvtotal)
cntpv= SimCountFactory().get_sim_counter()().sim_count(pvtotal, prefix="pv")
get_inverter_value_store(1).set(InverterState(counter=cntpv[1], power=pvtotal))
|
10,484 |
def get_action_args_with_defaults(action, args, defaults, templar, redirected_names=None):
collection_groups = {
'amazon.aws': 'aws',
'community.aws': 'aws',
'azure.azcollection': 'azure',
'wti.remote': 'cpm',
'community.general': 'docker',
'google.cloud': 'gcp',
'community.kubernetes': 'k8s',
'openstack.cloud': 'os',
'ovirt.ovirt': 'ovirt',
'community.vmware': 'vmware',
}
if not redirected_names:
redirected_names = [action]
collection_name = ''
resource = resolved_name = redirected_names[-1]
tmp_args = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
if isinstance(defaults, list):
for default in defaults:
module_defaults.update(default)
# if I actually have defaults, template and merge
if module_defaults:
module_defaults = templar.template(module_defaults)
# deal with configured group defaults first
if AnsibleCollectionRef.is_valid_fqcr(resolved_name):
collection_ref = AnsibleCollectionRef.from_fqcr(resolved_name, 'modules')
collection_name = collection_ref.collection
resource = collection_ref.resource
group_name = collection_groups.get(collection_name)
if 'group/%s' % group_name in module_defaults:
collection_routing = _get_collection_metadata(collection_name).get('action_groups', {})
if resolved_name in collection_routing or resource in collection_routing:
tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
# handle specific action defaults
if action in module_defaults:
tmp_args.update(module_defaults[action].copy())
# direct args override all
tmp_args.update(args)
return tmp_args
|
def get_action_args_with_defaults(action, args, defaults, templar, redirected_names=None):
collection_groups = {
'community.crypto': 'acme',
'community.aws': 'aws',
'azure.azcollection': 'azure',
'wti.remote': 'cpm',
'community.general': 'docker',
'google.cloud': 'gcp',
'community.kubernetes': 'k8s',
'openstack.cloud': 'os',
'ovirt.ovirt': 'ovirt',
'community.vmware': 'vmware',
}
if not redirected_names:
redirected_names = [action]
collection_name = ''
resource = resolved_name = redirected_names[-1]
tmp_args = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
if isinstance(defaults, list):
for default in defaults:
module_defaults.update(default)
# if I actually have defaults, template and merge
if module_defaults:
module_defaults = templar.template(module_defaults)
# deal with configured group defaults first
if AnsibleCollectionRef.is_valid_fqcr(resolved_name):
collection_ref = AnsibleCollectionRef.from_fqcr(resolved_name, 'modules')
collection_name = collection_ref.collection
resource = collection_ref.resource
group_name = collection_groups.get(collection_name)
if 'group/%s' % group_name in module_defaults:
collection_routing = _get_collection_metadata(collection_name).get('action_groups', {})
if resolved_name in collection_routing or resource in collection_routing:
tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
# handle specific action defaults
if action in module_defaults:
tmp_args.update(module_defaults[action].copy())
# direct args override all
tmp_args.update(args)
return tmp_args
|
36,675 |
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
# in order to prefer lowercase variables, process environment in
# two passes: first matches any, second pass matches lowercase only
# select only environment variables which end in (after making lowercase) _proxy
candidate_names = [name for name in os.environ.keys() if len(name)>5 and name[-6]=='_'] # fast selection of candidates
environment = [(name, os.environ[name], name.lower()) for name in candidate_names if name[-6:].lower()=='_proxy']
proxies = {}
for name, value, name_lower in environment:
if value and name_lower[-6:] == '_proxy':
proxies[name_lower[:-6]] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
for name, value, name_lower in environment:
if name[-6:] == '_proxy':
if value:
proxies[name_lower[:-6]] = value
else:
proxies.pop(name_lower[:-6], None)
return proxies
|
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
# in order to prefer lowercase variables, process environment in
# two passes: first matches any, second pass matches lowercase only
# select only environment variables which end in (after making lowercase) _proxy
candidate_names = [name for name in os.environ.keys() if len(name)>5 and name[-6]=='_'] # fast selection of candidates
environment = [(name, os.environ[name], name.lower()) for name in candidate_names if name[-6:].lower()=='_proxy']
proxies = {}
for name, value, name_lower in environment:
if value and name_lower[-6:] == '_proxy':
proxies[name_lower[:-6]] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
for name, value, name_lower in environment:
if name[-6:] == '_proxy':
if value:
proxies[name_lower[:-6]] = value
else:
proxies.pop(proxy_name, None)
return proxies
|
25,036 |
def _is_ignored_file(
element: str,
ignore_list: list[str],
ignore_list_re: list[Pattern[str]],
ignore_list_paths_re: list[Pattern[str]],
) -> bool:
element = os.path.normpath(element)
basename = os.path.basename(element)
return (
basename in ignore_list
or _is_in_ignore_list_re(basename, ignore_list_re)
or _is_in_ignore_list_re(element, ignore_list_paths_re)
)
|
def _is_ignored_file(
element: str,
ignore_list: list[str],
ignore_list_re: list[Pattern[str]],
ignore_list_paths_re: list[Pattern[str]],
) -> bool:
basename = os.path.basename(os.path.normpath(element))
return (
basename in ignore_list
or _is_in_ignore_list_re(basename, ignore_list_re)
or _is_in_ignore_list_re(element, ignore_list_paths_re)
)
|
41,996 |
def _find_coordinates_where_empty(
zmap: Dict[complex, Union[int, float]], contour_point_num: int
) -> List[complex]:
# this function implements missing value discovery and sorting
# algorithm used in Plotly to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/find_empties.js
# it works by repeteadly interating over coordinate map in search for patches of
# missing values with existing or previously discovered neighbors
# when discovered, such patches are added to the iteration queue (list of coordinates)
# sorted by number of neighbors, marking iteration order for interpolation algorithm
# search ends when all missing patches have been discovered
# it's like playing minesweeper in reverse
iter_queue: List[complex] = []
zcopy = zmap.copy()
discovered = 0
n_missing = (contour_point_num ** 2) - len(zmap)
coordinates = [
complex(xaxis, yaxis)
for yaxis in range(contour_point_num)
for xaxis in range(contour_point_num)
]
while discovered != n_missing:
patchmap: Dict[complex, Union[int, float]] = {}
for coord in coordinates:
value = zcopy.get(coord, None)
if value is not None:
# trial value or already discovered
continue
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zcopy.get(coord + offset, None)
if neighbor is not None:
n_neighbors += 1
if n_neighbors > 0:
patchmap[coord] = n_neighbors
zcopy.update(patchmap)
patch = [k for k, _ in sorted(patchmap.items(), key=lambda i: i[1], reverse=True)]
iter_queue.extend(patch)
discovered += len(patch)
return iter_queue
|
def _find_coordinates_where_empty(
zmap: Dict[complex, Union[int, float]], contour_point_num: int
) -> List[complex]:
# this function implements missing value discovery and sorting
# algorithm used in Plotly to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/find_empties.js
# it works by repeatedly iterating over coordinate map in search for patches of
# missing values with existing or previously discovered neighbors
# when discovered, such patches are added to the iteration queue (list of coordinates)
# sorted by number of neighbors, marking iteration order for interpolation algorithm
# search ends when all missing patches have been discovered
# it's like playing minesweeper in reverse
iter_queue: List[complex] = []
zcopy = zmap.copy()
discovered = 0
n_missing = (contour_point_num ** 2) - len(zmap)
coordinates = [
complex(xaxis, yaxis)
for yaxis in range(contour_point_num)
for xaxis in range(contour_point_num)
]
while discovered != n_missing:
patchmap: Dict[complex, Union[int, float]] = {}
for coord in coordinates:
value = zcopy.get(coord, None)
if value is not None:
# trial value or already discovered
continue
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zcopy.get(coord + offset, None)
if neighbor is not None:
n_neighbors += 1
if n_neighbors > 0:
patchmap[coord] = n_neighbors
zcopy.update(patchmap)
patch = [k for k, _ in sorted(patchmap.items(), key=lambda i: i[1], reverse=True)]
iter_queue.extend(patch)
discovered += len(patch)
return iter_queue
|
28,584 |
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bpv: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
23,672 |
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0–a_3 from [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time with frequency less than 20 minutes. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0–a_3 from [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
30,832 |
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering', {}).get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
23,175 |
def test_query_with_meta(db):
from sqlalchemy import sql
data = {
"name": pd.Series([], name="name", dtype="str"),
"age": pd.Series([], name="age", dtype="int"),
}
index = pd.Index([], name="number", dtype="int")
meta = pd.DataFrame(data, index=index)
s1 = sql.select(
[sql.column("number"), sql.column("name"), sql.column("age")]
).select_from(sql.table("test"))
out = read_sql_query(s1, db, npartitions=2, index_col="number", meta=meta)
# Don't check dtype for windows https://github.com/dask/dask/issues/8620
assert_eq(out, df[["name", "age"]], check_dtype=sys.int_info.sizeof_digit != 2)
|
def test_query_with_meta(db):
from sqlalchemy import sql
data = {
"name": pd.Series([], name="name", dtype="str"),
"age": pd.Series([], name="age", dtype="int"),
}
index = pd.Index([], name="number", dtype="int")
meta = pd.DataFrame(data, index=index)
s1 = sql.select(
[sql.column("number"), sql.column("name"), sql.column("age")]
).select_from(sql.table("test"))
out = read_sql_query(s1, db, npartitions=2, index_col="number", meta=meta)
# Don't check dtype for windows https://github.com/dask/dask/issues/8620
assert_eq(out, df[["name", "age"]], check_dtype=sys.int_info.sizeof_digit != 32)
|
29,073 |
def _build_file_selector(file_extension: str):
targeted_file_extensions = TARGETED_FILE_EXTENSIONS.copy()
if file_extension:
targeted_file_extensions.discard(EXTENSION)
return ProductionSafeTargetFileSelector(targeted_file_extensions)
|
def _build_file_selector(file_extension: str):
targeted_file_extensions = TARGETED_FILE_EXTENSIONS.copy()
if file_extension:
targeted_file_extensions.discard(file_extension)
return ProductionSafeTargetFileSelector(targeted_file_extensions)
|
43,833 |
def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): Name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
31,607 |
def modify_computer_command(client: Client, computer_id: int, expand: List[str], overrides: bool,
host_name: Optional[str], display_name: Optional[str], description: Optional[str],
group_id: Optional[int], policy_id: Optional[int], asset_importance_id: Optional[int],
relay_list_id: Optional[int]) -> CommandResults:
"""
Modify an existing computer inside Trend Micro.
Args:
client (client): The Trend Micro API client.
computer_id (int): The ID of the computer to modify.
expand (List[str]): The desired information about the computers.
overrides (bool): Whether to get the overridden properties or not.
host_name (str): The hostname of the computer.
display_name (Optional[str]): The display name of the computer.
description (Optional[str]): The description about the new computer.
group_id (Optional[int]): The computer group ID of the new computer.
policy_id (Optional[int]): The ID of the desired policy to apply to new computer.
asset_importance_id (Optional[int]): The asset importance ID to assign to the new computer.
relay_list_id (Optional[int]): The ID of the relay list to assign to the new computer.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
response = client.modify_computer(computer_id=computer_id, expand=expand, overrides=overrides, host_name=host_name,
display_name=display_name, description=description, group_id=group_id,
policy_id=policy_id, asset_importance_id=asset_importance_id,
relay_list_id=relay_list_id)
markdown = tableToMarkdown(f"Details for the computer {response['hostName']}", response, removeNull=True,
headers=COMPUTER_TABLE_HEADERS, headerTransform=pascalToSpace)
return CommandResults(outputs_prefix="TrendMicro.Computers", outputs_key_field="TrendMicro", outputs=response,
readable_output=markdown, raw_response=response)
|
def modify_computer_command(client: Client, computer_id: int, expand: List[str], overrides: bool,
host_name: Optional[str], display_name: Optional[str], description: Optional[str],
group_id: Optional[int], policy_id: Optional[int], asset_importance_id: Optional[int],
relay_list_id: Optional[int]) -> CommandResults:
"""
Modify an existing computer inside Trend Micro.
Args:
client (client): The Trend Micro API client.
computer_id (int): The ID of the computer to modify.
expand (List[str]): The desired information about the computers.
overrides (bool): Whether to get the overridden properties or not.
host_name (str): The hostname of the computer.
display_name (Optional[str]): The display name of the computer.
description (Optional[str]): The description about the new computer.
group_id (Optional[int]): The computer group ID of the new computer.
policy_id (Optional[int]): The ID of the desired policy to apply to new computer.
asset_importance_id (Optional[int]): The asset importance ID to assign to the new computer.
relay_list_id (Optional[int]): The ID of the relay list to assign to the new computer.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
response = client.modify_computer(computer_id=computer_id, expand=expand, overrides=overrides, host_name=host_name,
display_name=display_name, description=description, group_id=group_id,
policy_id=policy_id, asset_importance_id=asset_importance_id,
relay_list_id=relay_list_id)
markdown = tableToMarkdown(f"Details for the computer {response.get('hostName')}", response, removeNull=True,
headers=COMPUTER_TABLE_HEADERS, headerTransform=pascalToSpace)
return CommandResults(outputs_prefix="TrendMicro.Computers", outputs_key_field="TrendMicro", outputs=response,
readable_output=markdown, raw_response=response)
|
56,040 |
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)
elif data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the raw_datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif data_args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
with training_args.main_process_first(desc="dataset map pre-processing"):
raw_datasets = raw_datasets.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
else:
metric = load_metric("accuracy")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if
# we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(raw_datasets["validation_mismatched"])
combined = {}
for eval_dataset, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (
data_args.max_eval_samples
if data_args.max_eval_samples is not None
else len(eval_dataset)
)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
if task == "mnli-mm":
metrics = {k + "_mm": v for k, v in metrics.items()}
if "mnli" in task:
combined.update(metrics)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", combined if "mnli" in task else metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
predict_datasets.append(raw_datasets["test_mismatched"])
for predict_dataset, task in zip(predict_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
predict_dataset = predict_dataset.remove_columns("label")
predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_predict_file, "w") as writer:
logger.info(f"***** Predict results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if data_args.task_name is not None:
kwargs["language"] = "en"
kwargs["dataset_tags"] = "glue"
kwargs["dataset_args"] = data_args.task_name
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
|
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)
elif data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the raw_datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif data_args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
with training_args.main_process_first(desc="dataset map pre-processing"):
raw_datasets = raw_datasets.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
else:
metric = load_metric("accuracy")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if
# we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(raw_datasets["validation_mismatched"])
combined = {}
for eval_dataset, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (
data_args.max_eval_samples
if data_args.max_eval_samples is not None
else len(eval_dataset)
)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
if task == "mnli-mm":
combined.update({k + "_mm": v for k, v in metrics.items()})
elif task == "mnli":
combined.update(metrics)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", combined if "mnli" in task else metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
predict_datasets.append(raw_datasets["test_mismatched"])
for predict_dataset, task in zip(predict_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
predict_dataset = predict_dataset.remove_columns("label")
predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_predict_file, "w") as writer:
logger.info(f"***** Predict results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if data_args.task_name is not None:
kwargs["language"] = "en"
kwargs["dataset_tags"] = "glue"
kwargs["dataset_args"] = data_args.task_name
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
|
33,725 |
def train_cifar(config, checkpoint_dir=None, data_dir=None):
net = Net(config["l1"], config["l2"])
device = "cpu"
if torch.cuda.is_available():
device = "cuda:0"
if torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9)
# The `checkpoint_dir` parameter gets passed by Ray Tune when a checkpoint
# should be restored.
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
model_state, optimizer_state = torch.load(checkpoint)
net.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
trainset, testset = load_data(data_dir)
test_abs = int(len(trainset) * 0.8)
train_subset, val_subset = random_split(
trainset, [test_abs, len(trainset) - test_abs])
trainloader = torch.utils.data.DataLoader(
train_subset,
batch_size=int(config["batch_size"]),
shuffle=True,
num_workers=8)
valloader = torch.utils.data.DataLoader(
val_subset,
batch_size=int(config["batch_size"]),
shuffle=True,
num_workers=8)
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
epoch_steps = 0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
epoch_steps += 1
if i % 2000 == 1999: # print every 2000 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1,
running_loss / epoch_steps))
running_loss = 0.0
# Validation loss
val_loss = 0.0
val_steps = 0
total = 0
correct = 0
for i, data in enumerate(valloader, 0):
with torch.no_grad():
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
val_loss += loss.cpu().numpy()
val_steps += 1
# Here we save a checkpoint. It is automatically registered with
# Ray Tune and wil potentially be passed as the `checkpoint_dir`
# parameter in future iterations.
with tune.checkpoint_dir(step=epoch) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save(
(net.state_dict(), optimizer.state_dict()), path)
tune.report(loss=(val_loss / val_steps), accuracy=correct / total)
print("Finished Training")
# __train_end__
|
def train_cifar(config, checkpoint_dir=None, data_dir=None):
net = Net(config["l1"], config["l2"])
device = "cpu"
if torch.cuda.is_available():
device = "cuda:0"
if torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9)
# The `checkpoint_dir` parameter gets passed by Ray Tune when a checkpoint
# should be restored.
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
model_state, optimizer_state = torch.load(checkpoint)
net.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
trainset, testset = load_data(data_dir)
test_abs = int(len(trainset) * 0.8)
train_subset, val_subset = random_split(
trainset, [test_abs, len(trainset) - test_abs])
trainloader = torch.utils.data.DataLoader(
train_subset,
batch_size=int(config["batch_size"]),
shuffle=True,
num_workers=8)
valloader = torch.utils.data.DataLoader(
val_subset,
batch_size=int(config["batch_size"]),
shuffle=True,
num_workers=8)
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
epoch_steps = 0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
epoch_steps += 1
if i % 2000 == 1999: # print every 2000 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1,
running_loss / epoch_steps))
running_loss = 0.0
# Validation loss
val_loss = 0.0
val_steps = 0
total = 0
correct = 0
for i, data in enumerate(valloader, 0):
with torch.no_grad():
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
val_loss += loss.cpu().numpy()
val_steps += 1
# Here we save a checkpoint. It is automatically registered with
# Ray Tune and will potentially be passed as the `checkpoint_dir`
# parameter in future iterations.
with tune.checkpoint_dir(step=epoch) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save(
(net.state_dict(), optimizer.state_dict()), path)
tune.report(loss=(val_loss / val_steps), accuracy=correct / total)
print("Finished Training")
# __train_end__
|
42,856 |
def graph_embed(mat, max_mean_photon=1.0, make_traceless=True, tol=6):
r""" Given an symmetric adjacency matrix (that can be complex in general),
it returns the squeezing parameter and interferometer necessary for
implementing it in GBS.
Args:
mat (array): square symmetric complex (or real or integer) array
max_mean_photon (float): threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e. :math:`sinh(r_{max})^2 == max_mean_photon`
make_traceless (boolean): removes the trace of the input matrix.
tol (int): the number of decimal places to check the input matrix is symmetric
Returns:
tuple(array, array): Tuple containing the squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = mat.shape
if m != n:
raise ValueError("The Matrix is not square")
if np.round(np.linalg.norm(mat-np.transpose(mat)), tol) != 0:
raise ValueError("The input matrix is not symmetric")
if make_traceless:
A = mat - np.trace(mat)*np.identity(n)/n
s, U = takagi(A)
sc = np.sqrt(1.0+1.0/max_mean_photon)
vals = -np.arctanh(s/(s[0]*sc))
return vals, U
|
def graph_embed(mat, max_mean_photon=1.0, make_traceless=True, tol=6):
r""" Given an symmetric adjacency matrix (that can be complex in general),
it returns the squeezing parameters and interferometer necessary for
implementing it in GBS.
Args:
mat (array): square symmetric complex (or real or integer) array
max_mean_photon (float): threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e. :math:`sinh(r_{max})^2 == max_mean_photon`
make_traceless (boolean): removes the trace of the input matrix.
tol (int): the number of decimal places to check the input matrix is symmetric
Returns:
tuple(array, array): Tuple containing the squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = mat.shape
if m != n:
raise ValueError("The Matrix is not square")
if np.round(np.linalg.norm(mat-np.transpose(mat)), tol) != 0:
raise ValueError("The input matrix is not symmetric")
if make_traceless:
A = mat - np.trace(mat)*np.identity(n)/n
s, U = takagi(A)
sc = np.sqrt(1.0+1.0/max_mean_photon)
vals = -np.arctanh(s/(s[0]*sc))
return vals, U
|
27,820 |
def is_return_code_zero(args):
"""Return true iff the given command's return code is zero.
All the messages to stdout or stderr are suppressed.
Args:
args (list of str): Command to execute.
"""
with open(os.devnull, 'wb') as FNULL:
try:
subprocess.check_call(args, stdout=FNULL, stderr=FNULL)
except subprocess.CalledProcessError:
# The given command returned an error
return False
except OSError:
# The given command was not found
return False
return True
|
def is_return_code_zero(args):
"""Return true iff the given command's return code is zero.
All the messages sent to stdout or stderr are suppressed.
Args:
args (list of str): Command to execute.
"""
with open(os.devnull, 'wb') as FNULL:
try:
subprocess.check_call(args, stdout=FNULL, stderr=FNULL)
except subprocess.CalledProcessError:
# The given command returned an error
return False
except OSError:
# The given command was not found
return False
return True
|
11,088 |
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import __main__
py_script = Path(sys.argv[0])
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
# __spec__ is set when the server was started with the `-m` option,
# see https://docs.python.org/3/reference/import.html#main-spec
# __spec__ may not exists, e.g. when running in a Conda env.
if getattr(__main__, '__spec__', None) is not None and __main__.__spec__.parent:
args += ['-m', __main__.__spec__.parent]
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix('.exe')
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
return [exe_entrypoint, *sys.argv[1:]]
script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
return [*args, script_entrypoint, *sys.argv[1:]]
raise RuntimeError('Script %s does not exist.' % py_script)
else:
args += sys.argv
return args
|
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import __main__
py_script = Path(sys.argv[0])
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
# __spec__ is set when the server was started with the `-m` option,
# see https://docs.python.org/3/reference/import.html#main-spec
# __spec__ may not exist, e.g. when running in a Conda env.
if getattr(__main__, '__spec__', None) is not None and __main__.__spec__.parent:
args += ['-m', __main__.__spec__.parent]
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix('.exe')
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
return [exe_entrypoint, *sys.argv[1:]]
script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
return [*args, script_entrypoint, *sys.argv[1:]]
raise RuntimeError('Script %s does not exist.' % py_script)
else:
args += sys.argv
return args
|
33,526 |
def lambda_function_or_layer_arn(
type, entity_name, version=None, account_id=None, region_name=None
):
pattern = "arn:([a-z-]+):lambda:.*:.*:(function|layer):.*"
if re.match(pattern, entity_name):
return entity_name
if ":" in entity_name:
client = connect_to_service("lambda")
entity_name, _, alias = entity_name.rpartition(":")
try:
alias_response = client.get_alias(FunctionName=entity_name, Name=alias)
version = alias_response["FunctionVersion"]
except Exception as e:
msg = f"Alias {alias} of {entity_name} not found"
LOG.info(f"{msg}: {e}")
raise Exception(msg)
account_id = get_account_id(account_id)
region_name = region_name or get_region()
result = f"arn:aws:lambda:{region_name}:{account_id}:function:{entity_name}"
if version:
result = f"{result}:{version}"
return result
|
def lambda_function_or_layer_arn(
type, entity_name, version=None, account_id=None, region_name=None
):
pattern = "arn:([a-z-]+):lambda:.*:.*:(function|layer):.*"
if re.match(pattern, entity_name):
return entity_name
if ":" in entity_name:
client = connect_to_service("lambda")
entity_name, _, alias = entity_name.rpartition(":")
try:
alias_response = client.get_alias(FunctionName=entity_name, Name=alias)
version = alias_response["FunctionVersion"]
except Exception as e:
msg = f"Alias {alias} of {entity_name} not found"
LOG.info(f"{msg}: {e}")
raise Exception(msg)
account_id = get_account_id(account_id)
region_name = region_name or get_region()
result = f"arn:aws:lambda:{region_name}:{account_id}:{type}:{entity_name}"
if version:
result = f"{result}:{version}"
return result
|
32,417 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get("apikey")
if not api_key:
demisto.error("API Key is missing")
# get the service API url
BASE_URL = urljoin(demisto.params()["url"], "/")
verify_certificate = not demisto.params().get("insecure", False)
proxy = demisto.params().get("proxy", False)
# TODO
# Integration that implements reputation commands (e.g. url, ip, domain,..., etc) must have
# a reliability score of the source providing the intelligence data.
# reliability = demisto.params().get("integrationReliability", DBotScoreReliability.C)
demisto.debug(f"Command being called is {demisto.command()}")
try:
headers = {"Authorization": f"Bearer {api_key}"}
client = Client(
base_url=BASE_URL, verify=verify_certificate, headers=headers, proxy=proxy
)
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
# https://api.sekoia.io/v1/apiauth/auth/validate
result = test_module(client)
return_results(result)
elif demisto.command() == "GetObservable":
return_results(get_observable_command(client, demisto.args()))
elif demisto.command() == "GetIndicator":
return_results(get_indicator_command(client, demisto.args()))
elif demisto.command() == "GetIndicatorContext":
return_results(get_indicator_context_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command. "
f"\nError:\n{str(e)} please consult endpoint documentation {DOC_MAPPING.get(demisto.command())}"
)
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get("apikey")
if not api_key:
demisto.error("API Key is missing")
# get the service API url
BASE_URL = urljoin(demisto.params().get("url", "https://app.sekoia.io"), "/")
verify_certificate = not demisto.params().get("insecure", False)
proxy = demisto.params().get("proxy", False)
# TODO
# Integration that implements reputation commands (e.g. url, ip, domain,..., etc) must have
# a reliability score of the source providing the intelligence data.
# reliability = demisto.params().get("integrationReliability", DBotScoreReliability.C)
demisto.debug(f"Command being called is {demisto.command()}")
try:
headers = {"Authorization": f"Bearer {api_key}"}
client = Client(
base_url=BASE_URL, verify=verify_certificate, headers=headers, proxy=proxy
)
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
# https://api.sekoia.io/v1/apiauth/auth/validate
result = test_module(client)
return_results(result)
elif demisto.command() == "GetObservable":
return_results(get_observable_command(client, demisto.args()))
elif demisto.command() == "GetIndicator":
return_results(get_indicator_command(client, demisto.args()))
elif demisto.command() == "GetIndicatorContext":
return_results(get_indicator_context_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command. "
f"\nError:\n{str(e)} please consult endpoint documentation {DOC_MAPPING.get(demisto.command())}"
)
|
30,108 |
def test_prefetch_subject_scaled_is_larger_outsigs(runtmp, linear_gather):
# test prefetch output sigs
c = runtmp
# make a query sketch with scaled=1000
fa = utils.get_test_data('genome-s10.fa.gz')
c.run_sourmash('sketch', 'dna', fa, '-o', 'query.sig')
assert os.path.exists(runtmp.output('query.sig'))
# this has a scaled of 10000, from same genome:
against1 = utils.get_test_data('scaled/genome-s10.fa.gz.sig')
against2 = utils.get_test_data('scaled/all.sbt.zip')
against3 = utils.get_test_data('scaled/all.lca.json')
# run against large scaled, then small (self)
c.run_sourmash('prefetch', 'query.sig', against1, against2, against3,
'query.sig', linear_gather, '--save-matches', 'matches.sig')
print(c.last_result.status)
print(c.last_result.out)
print(c.last_result.err)
assert c.last_result.status == 0
assert 'total of 8 matching signatures.' in c.last_result.err
assert 'of 48 distinct query hashes, 48 were found in matches above threshold.' in c.last_result.err
assert 'final scaled value (max across query and all matches) is 10000' in c.last_result.err
# make sure non-downsampled sketches were saved.
matches = sourmash.load_file_as_signatures(runtmp.output('matches.sig'))
scaled_vals = set([ match.minhash.scaled for match in matches ])
assert 1000 in scaled_vals
assert 10000 in scaled_vals
assert len(scaled_vals) == 2
|
def test_prefetch_subject_scaled_is_larger_outsigs(runtmp, linear_gather):
# test prefetch where subject scaled is larger -- output sigs
c = runtmp
# make a query sketch with scaled=1000
fa = utils.get_test_data('genome-s10.fa.gz')
c.run_sourmash('sketch', 'dna', fa, '-o', 'query.sig')
assert os.path.exists(runtmp.output('query.sig'))
# this has a scaled of 10000, from same genome:
against1 = utils.get_test_data('scaled/genome-s10.fa.gz.sig')
against2 = utils.get_test_data('scaled/all.sbt.zip')
against3 = utils.get_test_data('scaled/all.lca.json')
# run against large scaled, then small (self)
c.run_sourmash('prefetch', 'query.sig', against1, against2, against3,
'query.sig', linear_gather, '--save-matches', 'matches.sig')
print(c.last_result.status)
print(c.last_result.out)
print(c.last_result.err)
assert c.last_result.status == 0
assert 'total of 8 matching signatures.' in c.last_result.err
assert 'of 48 distinct query hashes, 48 were found in matches above threshold.' in c.last_result.err
assert 'final scaled value (max across query and all matches) is 10000' in c.last_result.err
# make sure non-downsampled sketches were saved.
matches = sourmash.load_file_as_signatures(runtmp.output('matches.sig'))
scaled_vals = set([ match.minhash.scaled for match in matches ])
assert 1000 in scaled_vals
assert 10000 in scaled_vals
assert len(scaled_vals) == 2
|
12,157 |
def pack(obj, compress=True):
if config['blob.encode_bypass'] is True:
return obj
blob = b"mYm\0"
blob += pack_obj(obj)
if compress:
compressed = b'ZL123\0' + np.uint64(len(blob)).tostring() + zlib.compress(blob)
if len(compressed) < len(blob):
blob = compressed
return blob
|
def pack(obj, compress=True):
if config['blob.encode_bypass']:
return obj
blob = b"mYm\0"
blob += pack_obj(obj)
if compress:
compressed = b'ZL123\0' + np.uint64(len(blob)).tostring() + zlib.compress(blob)
if len(compressed) < len(blob):
blob = compressed
return blob
|
10,232 |
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
bios_attribute_name=dict(default='null'),
bios_attribute_value=dict(default='null'),
timeout=dict(type='int', default=10),
manager_services=dict()
),
supports_check_mode=False
)
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# BIOS attributes to update
bios_attributes = {'bios_attr_name': module.params['bios_attribute_name'],
'bios_attr_value': module.params['bios_attribute_value']}
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a System resource
result = rf_utils._find_systems_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "SetBiosDefaultSettings":
result = rf_utils.set_bios_default_settings()
elif command == "SetBiosAttributes":
result = rf_utils.set_bios_attributes(bios_attributes)
elif category == "Manager":
# execute only if we find a Manager service resource
result = rf_utils._find_managers_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "SetManagerServices":
result = rf_utils.set_manager_services(ast.literal_eval(module.params['manager_services']))
# Return data back or fail with proper message
if result['ret'] is True:
module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
else:
module.fail_json(msg=to_native(result['msg']))
|
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
bios_attribute_name=dict(default='null'),
bios_attribute_value=dict(default='null'),
timeout=dict(type='int', default=10),
manager_services=dict()
),
supports_check_mode=False
)
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# BIOS attributes to update
bios_attributes = {'bios_attr_name': module.params['bios_attribute_name'],
'bios_attr_value': module.params['bios_attribute_value']}
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a System resource
result = rf_utils._find_systems_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "SetBiosDefaultSettings":
result = rf_utils.set_bios_default_settings()
elif command == "SetBiosAttributes":
result = rf_utils.set_bios_attributes(bios_attributes)
elif category == "Manager":
# execute only if we find a Manager service resource
result = rf_utils._find_managers_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "SetManagerServices":
result = rf_utils.set_network_protocols(module.params['network_protocols'])
# Return data back or fail with proper message
if result['ret'] is True:
module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
else:
module.fail_json(msg=to_native(result['msg']))
|
5,467 |
def global_contribution_form(request):
"""Adds contribution form to the context."""
if enabled(request):
initial_data = {}
if hasattr(request, 'user'):
initial_data = {
'name': request.user.get_full_name() or request.user.username,
'email': request.user.email,
} if request.user.is_authenticated else {}
return {
'contribution_enabled': True,
'contribution_recurring_payment_enabled': recurring_payment_enabled(request),
'contribution_popup': popup_enabled(request),
'contribution_form': ContributionForm(initial=initial_data),
'contribution_recurring_payment_form': ContributionRecurringPaymentForm(initial=initial_data),
'hide_cta': True,
}
return {'contribution_enabled': False}
|
def global_contribution_form(request):
"""Adds contribution form to the context."""
if enabled(request):
initial_data = {}
if hasattr(request, 'user'):
initial_data = {
'name': request.user.get_full_name() or request.user.username,
'email': request.user.email,
} if request.user.is_authenticated else {}
return {
'contribution_enabled': True,
'recurring_payment_enabled': recurring_payment_enabled(request),
'contribution_popup': popup_enabled(request),
'contribution_form': ContributionForm(initial=initial_data),
'contribution_recurring_payment_form': ContributionRecurringPaymentForm(initial=initial_data),
'hide_cta': True,
}
return {'contribution_enabled': False}
|
46,347 |
def _daal_roc_auc_score(y_true, y_score, *, average="macro", sample_weight=None,
max_fpr=None, multi_class="raise", labels=None):
y_type = _daal_type_of_target(y_true)
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type[0] == "multiclass" or (y_type[0] == "binary" and
y_score.ndim == 2 and
y_score.shape[1] > 2):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.:
raise ValueError("Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr))
if multi_class == 'raise':
raise ValueError("multi_class must be in ('ovo', 'ovr')")
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("sklearn"))
result = _multiclass_roc_auc_score(y_true, y_score, labels,
multi_class, average, sample_weight)
elif y_type[0] == "binary":
labels = y_type[1]
condition = max_fpr is None and sample_weight is None and len(labels) == 2
if condition:
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("daal"))
if not np.array_equal(labels, [0, 1]):
y_true = label_binarize(y_true, classes=labels)[:, 0]
result = d4p.daal_roc_auc_score(y_true.reshape(-1, 1), y_score.reshape(-1, 1))
if not condition or result == -1:
y_true = label_binarize(y_true, classes=labels)[:, 0]
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("sklearn"))
result = _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
else:
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("sklearn"))
result = _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
return result
|
def _daal_roc_auc_score(y_true, y_score, *, average="macro", sample_weight=None,
max_fpr=None, multi_class="raise", labels=None):
y_type = _daal_type_of_target(y_true)
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type[0] == "multiclass" or (y_type[0] == "binary" and
y_score.ndim == 2 and
y_score.shape[1] > 2):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.:
raise ValueError("Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr))
if multi_class == 'raise':
raise ValueError("multi_class must be in ('ovo', 'ovr')")
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("sklearn"))
result = _multiclass_roc_auc_score(y_true, y_score, labels,
multi_class, average, sample_weight)
elif y_type[0] == "binary":
labels = y_type[1]
daal_use = max_fpr is None and sample_weight is None and len(labels) == 2
if condition:
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("daal"))
if not np.array_equal(labels, [0, 1]):
y_true = label_binarize(y_true, classes=labels)[:, 0]
result = d4p.daal_roc_auc_score(y_true.reshape(-1, 1), y_score.reshape(-1, 1))
if not condition or result == -1:
y_true = label_binarize(y_true, classes=labels)[:, 0]
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("sklearn"))
result = _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
else:
logging.info("sklearn.metrics.roc_auc_score: " + get_patch_message("sklearn"))
result = _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
return result
|
53,272 |
def get_paschen_constants (gas,electrode):
r"""
Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage
Parameters
----------
gas : 'str'
electrode : 'str'
String representing the gas and electrode material
Return
------
dictionary containing the constants A, B and townsend_gamma for calculation of the breakdwn voltage
References
---------
Paschen_constants contains the coefficents A and B for the estimation of the
First Townsend Ionization Coefficent
(exponential fit to the First Townsend Ionization coefficient)
as adapted from
E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics,
Wiley-Interscience, New York 1971
format: paschen_constants dir {"gas":[A,B]}
units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)]
Townsend_gamma is the Second Townsend Ionization coefficient as given by
A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application
IOP Publishing Ltd 2016
ISBN 978-0-7503-1236-3 (ebook)
ISBN 978-0-7503-1237-0 (print)
Examples
--------
c=def get_paschen_constants ("Ar","Ni):
c={'A': 11, 'B': 135, 'gam': 0.058}
c=def get_paschen_constants ("Ar","zz"):
c={'A': 11, 'B': 135, 'gam': 0.01}
If electrode material is not found a default value of 0.01 is taken
c=def get_paschen_constants ("Zz","Ni"):
c=None
If gas is not found, c is set to None
"""
# Supported gases
gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"]
paschen_constants={"Air":[11,274],
"N2":[9.0, 257],
"H2":[3.8,104],
"He":[2.3,26],
"Ne":[3.0, 75],
"Ar":[11,135],
"Kr":[13,180],
"Xe":[20,263]}
# Supported electrode materials
materials=["Al","Cu","Ni","Pt","C","W","Fe"]
townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02},
"N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059},
"H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061},
"He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015},
"Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022},
"Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058},
"Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None},
"Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}}
# Check if the asked gas and electrode material is supported
resg= gas in gases
rese=electrode in materials
# If the gas is supported get the constants A and B
print(resg,rese)
if resg==True :
print(gas)
A=paschen_constants[gas][0]
B=paschen_constants[gas][1]
print(A,B)
# Get the townsend_gamma coefficient for the the gas/electrode combination
if rese==True:
gam=townsend_gamma[gas]
print(gam)
gn=gam[electrode]
print (gn)
# Test if townsend_gamma exists for the demanded gas/electrode configuration
# If not a default townsend_gamma value of 0.01 is taken
if gn is None:
gn=0.01
print("default")
print(gn)
else:
# If the electrode material is not supportes set townsend_gamma to default = 0.01
gn=0.01
print("default")
# Create output dir {const}
const={"A":A,"B":B,"gam":gn}
print(const)
return const
# If gas is not supported set const=None
else :
const=None
print("No constants for this gas available",const)
return const
|
def get_paschen_constants (gas,electrode):
r"""
Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage
Parameters
----------
gas : 'str'
electrode : 'str'
String representing the gas and electrode material
Returns
-------
Dictionary containing the constants ``A``, ``B`` and ``townsend_gamma`` for calculation
of the breakdown voltage.
References
---------
Paschen_constants contains the coefficents A and B for the estimation of the
First Townsend Ionization Coefficent
(exponential fit to the First Townsend Ionization coefficient)
as adapted from
E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics,
Wiley-Interscience, New York 1971
format: paschen_constants dir {"gas":[A,B]}
units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)]
Townsend_gamma is the Second Townsend Ionization coefficient as given by
A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application
IOP Publishing Ltd 2016
ISBN 978-0-7503-1236-3 (ebook)
ISBN 978-0-7503-1237-0 (print)
Examples
--------
c=def get_paschen_constants ("Ar","Ni):
c={'A': 11, 'B': 135, 'gam': 0.058}
c=def get_paschen_constants ("Ar","zz"):
c={'A': 11, 'B': 135, 'gam': 0.01}
If electrode material is not found a default value of 0.01 is taken
c=def get_paschen_constants ("Zz","Ni"):
c=None
If gas is not found, c is set to None
"""
# Supported gases
gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"]
paschen_constants={"Air":[11,274],
"N2":[9.0, 257],
"H2":[3.8,104],
"He":[2.3,26],
"Ne":[3.0, 75],
"Ar":[11,135],
"Kr":[13,180],
"Xe":[20,263]}
# Supported electrode materials
materials=["Al","Cu","Ni","Pt","C","W","Fe"]
townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02},
"N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059},
"H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061},
"He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015},
"Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022},
"Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058},
"Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None},
"Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}}
# Check if the asked gas and electrode material is supported
resg= gas in gases
rese=electrode in materials
# If the gas is supported get the constants A and B
print(resg,rese)
if resg==True :
print(gas)
A=paschen_constants[gas][0]
B=paschen_constants[gas][1]
print(A,B)
# Get the townsend_gamma coefficient for the the gas/electrode combination
if rese==True:
gam=townsend_gamma[gas]
print(gam)
gn=gam[electrode]
print (gn)
# Test if townsend_gamma exists for the demanded gas/electrode configuration
# If not a default townsend_gamma value of 0.01 is taken
if gn is None:
gn=0.01
print("default")
print(gn)
else:
# If the electrode material is not supportes set townsend_gamma to default = 0.01
gn=0.01
print("default")
# Create output dir {const}
const={"A":A,"B":B,"gam":gn}
print(const)
return const
# If gas is not supported set const=None
else :
const=None
print("No constants for this gas available",const)
return const
|
33,034 |
def run_workspace_command(
args: CommandLineArguments,
root: str,
cmd: List[str],
network: bool = False,
env: Mapping[str, str] = MappingProxyType({}),
nspawn_params: Optional[List[str]] = None,
) -> None:
cmdline = [
"systemd-nspawn",
"--quiet",
"--directory=" + root,
"--uuid=" + args.machine_id,
"--machine=mkosi-" + uuid.uuid4().hex,
"--as-pid2",
"--register=no",
"--bind=" + var_tmp(root) + ":/var/tmp",
"--setenv=SYSTEMD_OFFLINE=1",
]
if network:
# If we're using the host network namespace, use the same resolver
cmdline += ["--bind-ro=/etc/resolv.conf"]
else:
cmdline += ["--private-network"]
cmdline += [f"--setenv={k}={v}" for k, v in env.items()]
if nspawn_params:
cmdline += nspawn_params
result = run(cmdline + ["--"] + cmd, check=False)
if result.returncode != 0:
if "workspace-command" in ARG_DEBUG:
run(cmdline, check=False)
die(f"Workspace command `{' '.join(cmd)}` returned non-zero exit code {result.returncode}.")
|
def run_workspace_command(
args: CommandLineArguments,
root: str,
cmd: List[str],
network: bool = False,
env: Optional[Dict[str, str]] = None,
nspawn_params: Optional[List[str]] = None,
) -> None:
cmdline = [
"systemd-nspawn",
"--quiet",
"--directory=" + root,
"--uuid=" + args.machine_id,
"--machine=mkosi-" + uuid.uuid4().hex,
"--as-pid2",
"--register=no",
"--bind=" + var_tmp(root) + ":/var/tmp",
"--setenv=SYSTEMD_OFFLINE=1",
]
if network:
# If we're using the host network namespace, use the same resolver
cmdline += ["--bind-ro=/etc/resolv.conf"]
else:
cmdline += ["--private-network"]
cmdline += [f"--setenv={k}={v}" for k, v in env.items()]
if nspawn_params:
cmdline += nspawn_params
result = run(cmdline + ["--"] + cmd, check=False)
if result.returncode != 0:
if "workspace-command" in ARG_DEBUG:
run(cmdline, check=False)
die(f"Workspace command `{' '.join(cmd)}` returned non-zero exit code {result.returncode}.")
|
45,566 |
def ReminderCancelled(name, timestamp=None):
return {
"event": "cancel",
"timestamp": timestamp,
"name": name
}
|
def ReminderCancelled(action_name, name=None, timestamp=None):
return {
"event": "cancel",
"timestamp": timestamp,
"name": name
}
|
57,910 |
def redlock_list_scans():
"""
List DevOps Scans
"""
group_by = demisto.args().get('group_by', 'scanId')
page_size = demisto.args().get('page_size', 25)
page_number = demisto.args().get('page_number', 1)
sort = demisto.args().get('sort', None)
filter_type = demisto.args().get('filter_type', 'relative')
filter_time_amount = demisto.args().get('filter_time_amount', 1)
filter_time_unit = demisto.args().get('filter_time_unit', 'day')
filter_user = demisto.args().get('filter_user', None)
filter_status = demisto.args().get('filter_status', None)
filter_asset_type = demisto.args().get('filter_asset_type', None)
filter_asset_name = demisto.args().get('filter_asset_name', None)
filter_start_time = demisto.args().get('filter_start_time', None)
filter_end_time = demisto.args().get('filter_end_time', None)
list_filter = {
'groupBy': group_by,
'page[size]': page_size,
'page[number]': page_number,
'filter[timeType]': filter_type
}
if sort:
list_filter['sort'] = sort
if filter_type == 'relative':
if filter_time_unit and filter_time_amount:
list_filter['filter[timeUnit]'] = filter_time_unit
list_filter['filter[timeAmount]'] = filter_time_amount
else:
return_error('You must specify a filter_time_unit and filter_time_amount with relative type filter')
elif filter_type == 'to_now':
if filter_start_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify filter_start_time with to_now type filter')
elif filter_type == 'absolute':
if filter_start_time and filter_end_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify a filter_start_time and filter_end_time with absolute type filter')
if filter_user:
list_filter['filter[user]'] = filter_user
if filter_status:
list_filter['filter[status]'] = filter_status
if filter_asset_type:
list_filter['filter[assetType]'] = filter_asset_type
if filter_asset_name:
list_filter['filter[assetName]'] = filter_asset_name
response = req('GET', 'iac/v2/scans', param_data=list_filter, data={})
if (
not response
or 'data' not in response
or not isinstance(response['data'], list)
):
demisto.results('No results found')
else:
items = response['data']
readable_output = []
for item in items:
readable_output.append({
"ID": item.get('id'),
"Name": item.get('attributes')['name'],
"Type": item.get('attributes')['type'],
"Scan Time": item.get('attributes')['scanTime'],
"User": item.get('attributes')['user']
})
md = tableToMarkdown("Scans List:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': items},
'HumanReadable': md
})
|
def redlock_list_scans():
"""
List DevOps Scans
"""
group_by = demisto.args().get('group_by', 'scanId')
page_size = demisto.args().get('page_size', 25)
page_number = demisto.args().get('page_number', 1)
sort = demisto.args().get('sort', None)
filter_type = demisto.args().get('filter_type', 'relative')
filter_time_amount = demisto.args().get('filter_time_amount', 1)
filter_time_unit = demisto.args().get('filter_time_unit', 'day')
filter_user = demisto.args().get('filter_user', None)
filter_status = demisto.args().get('filter_status', None)
filter_asset_type = demisto.args().get('filter_asset_type', None)
filter_asset_name = demisto.args().get('filter_asset_name', None)
filter_start_time = demisto.args().get('filter_start_time', None)
filter_end_time = demisto.args().get('filter_end_time', None)
list_filter = {
'groupBy': group_by,
'page[size]': page_size,
'page[number]': page_number,
'filter[timeType]': filter_type
}
if sort:
list_filter['sort'] = sort
if filter_type == 'relative':
if filter_time_unit and filter_time_amount:
list_filter['filter[timeUnit]'] = filter_time_unit
list_filter['filter[timeAmount]'] = filter_time_amount
else:
return_error('You must specify a filter_time_unit and filter_time_amount with relative type filter')
elif filter_type == 'to_now':
if filter_time_unit:
list_filter['filter[timeUnit]'] = filter_time_unit
else:
return_error('You must specify filter_time_unit with to_now type filter')
elif filter_type == 'absolute':
if filter_start_time and filter_end_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify a filter_start_time and filter_end_time with absolute type filter')
if filter_user:
list_filter['filter[user]'] = filter_user
if filter_status:
list_filter['filter[status]'] = filter_status
if filter_asset_type:
list_filter['filter[assetType]'] = filter_asset_type
if filter_asset_name:
list_filter['filter[assetName]'] = filter_asset_name
response = req('GET', 'iac/v2/scans', param_data=list_filter, data={})
if (
not response
or 'data' not in response
or not isinstance(response['data'], list)
):
demisto.results('No results found')
else:
items = response['data']
readable_output = []
for item in items:
readable_output.append({
"ID": item.get('id'),
"Name": item.get('attributes')['name'],
"Type": item.get('attributes')['type'],
"Scan Time": item.get('attributes')['scanTime'],
"User": item.get('attributes')['user']
})
md = tableToMarkdown("Scans List:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': items},
'HumanReadable': md
})
|
41,683 |
def build_from_graph(pkg_map: Dict[str, BasePackage], outputdir: Path, args) -> None:
"""
This builds packages in pkg_map in parallel, building at most args.n_jobs
packages at once.
We have a priority queue of packages we are ready to build (build_queue),
where a package is ready to build if all its dependencies are built. The
priority is based on the number of dependents --- we prefer to build
packages with more dependents first.
To build packages in parallel, we use a thread pool of args.n_jobs many
threads listening to build_queue. When the thread is free, it takes an
item off build_queue and builds it. Once the package is built, it sends the
package to the built_queue. The main thread listens to the built_queue and
checks if any of the dependents are ready to be built. If so, it add the
package to the build queue.
"""
# Insert packages into build_queue. We *must* do this after counting
# dependents, because the ordering ought not to change after insertion.
build_queue: PriorityQueue = PriorityQueue()
print("Packages that would be built: " + ", ".join(pkg_map))
for pkg in pkg_map.values():
if len(pkg.dependencies) == 0:
build_queue.put(pkg)
built_queue: Queue = Queue()
thread_lock = Lock()
queue_idx = 0
def builder(n):
print(f"Starting thread {n}")
nonlocal queue_idx
while True:
pkg = build_queue.get()
with thread_lock:
pkg._queue_idx = queue_idx
queue_idx += 1
print(f"[{pkg._queue_idx}/{len(pkg_map)}] (thread {n}) building {pkg.name}")
t0 = perf_counter()
try:
pkg.build(outputdir, args)
except Exception as e:
built_queue.put(e)
return
print(
f"[{pkg._queue_idx}/{len(pkg_map)}] (thread {n}) built {pkg.name} in {perf_counter() - t0:.1f} s"
)
built_queue.put(pkg)
# Release the GIL so new packages get queued
sleep(0.01)
for n in range(0, args.n_jobs):
Thread(target=builder, args=(n + 1,), daemon=True).start()
num_built = 0
while num_built < len(pkg_map):
pkg = built_queue.get()
if isinstance(pkg, Exception):
raise pkg
num_built += 1
for _dependent in pkg.dependents:
dependent = pkg_map[_dependent]
dependent.unbuilt_dependencies.remove(pkg.name)
if len(dependent.unbuilt_dependencies) == 0:
build_queue.put(dependent)
|
def build_from_graph(pkg_map: Dict[str, BasePackage], outputdir: Path, args) -> None:
"""
This builds packages in pkg_map in parallel, building at most args.n_jobs
packages at once.
We have a priority queue of packages we are ready to build (build_queue),
where a package is ready to build if all its dependencies are built. The
priority is based on the number of dependents --- we prefer to build
packages with more dependents first.
To build packages in parallel, we use a thread pool of args.n_jobs many
threads listening to build_queue. When the thread is free, it takes an
item off build_queue and builds it. Once the package is built, it sends the
package to the built_queue. The main thread listens to the built_queue and
checks if any of the dependents are ready to be built. If so, it add the
package to the build queue.
"""
# Insert packages into build_queue. We *must* do this after counting
# dependents, because the ordering ought not to change after insertion.
build_queue: PriorityQueue = PriorityQueue()
print("Building the following packages: " + ", ".join(pkg_map))
for pkg in pkg_map.values():
if len(pkg.dependencies) == 0:
build_queue.put(pkg)
built_queue: Queue = Queue()
thread_lock = Lock()
queue_idx = 0
def builder(n):
print(f"Starting thread {n}")
nonlocal queue_idx
while True:
pkg = build_queue.get()
with thread_lock:
pkg._queue_idx = queue_idx
queue_idx += 1
print(f"[{pkg._queue_idx}/{len(pkg_map)}] (thread {n}) building {pkg.name}")
t0 = perf_counter()
try:
pkg.build(outputdir, args)
except Exception as e:
built_queue.put(e)
return
print(
f"[{pkg._queue_idx}/{len(pkg_map)}] (thread {n}) built {pkg.name} in {perf_counter() - t0:.1f} s"
)
built_queue.put(pkg)
# Release the GIL so new packages get queued
sleep(0.01)
for n in range(0, args.n_jobs):
Thread(target=builder, args=(n + 1,), daemon=True).start()
num_built = 0
while num_built < len(pkg_map):
pkg = built_queue.get()
if isinstance(pkg, Exception):
raise pkg
num_built += 1
for _dependent in pkg.dependents:
dependent = pkg_map[_dependent]
dependent.unbuilt_dependencies.remove(pkg.name)
if len(dependent.unbuilt_dependencies) == 0:
build_queue.put(dependent)
|
58,857 |
def eye(N, M=None, k=0, dtype=float, order='C'):
"""Returns a 2-D array with ones on the diagonals and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. ``M == N`` by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type specifier.
order ({'C', 'F'}): Row-major (C-style) or column-major
(Fortran-style) order.
Returns:
cupy.ndarray: A 2-D array with given diagonals filled with ones and
zeros elsewhere.
.. seealso:: :func:`numpy.eye`
"""
if M is None:
M = N
ret = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return ret
if k <= -N:
return ret
ret.diagonal(k)[:] = 1
return ret
|
def eye(N, M=None, k=0, dtype=float, order='C'):
"""Returns a 2-D array with ones on the diagonals and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. ``M == N`` by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type specifier.
order ({'C', 'F'}): Row-major (C-style) or column-major
(Fortran-style) order.
Returns:
cupy.ndarray: A 2-D array with given diagonals filled with ones and
zeros elsewhere.
.. seealso:: :func:`numpy.eye`
"""
if M is None:
M = N
ret = zeros((N, M), dtype=dtype, order=order)
if k <= -N or k >= M:
return ret
ret.diagonal(k)[:] = 1
return ret
|
14,350 |
def parseArgs(args):
"""Parse argv.
Returns:
3-tuple (infile, axisLimits, options)
axisLimits is either a Dict[str, Optional[float]], for pinning variation axes
to specific coordinates along those axes (with `None` as a placeholder for an
axis' default value); or a Dict[str, Tuple(float, float)], meaning limit this
axis to min/max range.
Axes locations are in user-space coordinates, as defined in the "fvar" table.
"""
from fontTools import configLogger
import argparse
parser = argparse.ArgumentParser(
"fonttools varLib.instancer",
description="Partially instantiate a variable font",
)
parser.add_argument("input", metavar="INPUT.ttf", help="Input variable TTF file.")
parser.add_argument(
"locargs",
metavar="AXIS=LOC",
nargs="*",
help="List of space separated locations. A location consists of "
"the tag of a variation axis, followed by '=' and one of number, "
"number:number or the literal string 'drop'. "
"E.g.: wdth=100 or wght=75.0:125.0 or wght=drop",
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance TTF file (default: INPUT-instance.ttf).",
)
parser.add_argument(
"--no-optimize",
dest="optimize",
action="store_false",
help="Don't perform IUP optimization on the remaining gvar TupleVariations",
)
parser.add_argument(
"--no-overlap-flag",
dest="overlap",
action="store_false",
help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags (only applicable "
"when generating a full instance)",
)
parser.add_argument(
"--remove-overlaps",
dest="remove_overlaps",
action="store_true",
help="Merge overlapping contours and components (only applicable "
"when generating a full instance). Requires skia-pathops",
)
parser.add_argument(
"--ignore-overlap-errors",
dest="ignore_overlap_errors",
action="store_true",
help="Don't crash if the remove-overlaps operation fails for some glyphs.",
)
parser.add_argument(
"--update-name-table",
action="store_true",
help="Update the instantiated font's `name` table. Input font must have "
"a STAT table with Axis Value Tables",
)
parser.add_argument(
"--named-instances",
"-n",
nargs="?",
const=".*",
help="Create static instances for each instance in the variable font's "
"fvar table matching the given regular expression",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
if options.remove_overlaps:
if options.ignore_overlap_errors:
options.overlap = OverlapMode.REMOVE_AND_IGNORE_ERRORS
else:
options.overlap = OverlapMode.REMOVE
else:
options.overlap = OverlapMode(int(options.overlap))
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
try:
axisLimits = parseLimits(options.locargs)
except ValueError as e:
parser.error(str(e))
if len(axisLimits) != len(options.locargs):
parser.error("Specified multiple limits for the same axis")
if axisLimits and options.named_instances:
parser.error("Specified limits and --fvar")
return (infile, axisLimits, options)
|
def parseArgs(args):
"""Parse argv.
Returns:
3-tuple (infile, axisLimits, options)
axisLimits is either a Dict[str, Optional[float]], for pinning variation axes
to specific coordinates along those axes (with `None` as a placeholder for an
axis' default value); or a Dict[str, Tuple(float, float)], meaning limit this
axis to min/max range.
Axes locations are in user-space coordinates, as defined in the "fvar" table.
"""
from fontTools import configLogger
import argparse
parser = argparse.ArgumentParser(
"fonttools varLib.instancer",
description="Partially instantiate a variable font",
)
parser.add_argument("input", metavar="INPUT.ttf", help="Input variable TTF file.")
parser.add_argument(
"locargs",
metavar="AXIS=LOC",
nargs="*",
help="List of space separated locations. A location consists of "
"the tag of a variation axis, followed by '=' and one of number, "
"number:number or the literal string 'drop'. "
"E.g.: wdth=100 or wght=75.0:125.0 or wght=drop",
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance TTF file (default: INPUT-instance.ttf).",
)
parser.add_argument(
"--no-optimize",
dest="optimize",
action="store_false",
help="Don't perform IUP optimization on the remaining gvar TupleVariations",
)
parser.add_argument(
"--no-overlap-flag",
dest="overlap",
action="store_false",
help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags (only applicable "
"when generating a full instance)",
)
parser.add_argument(
"--remove-overlaps",
dest="remove_overlaps",
action="store_true",
help="Merge overlapping contours and components (only applicable "
"when generating a full instance). Requires skia-pathops",
)
parser.add_argument(
"--ignore-overlap-errors",
dest="ignore_overlap_errors",
action="store_true",
help="Don't crash if the remove-overlaps operation fails for some glyphs.",
)
parser.add_argument(
"--update-name-table",
action="store_true",
help="Update the instantiated font's `name` table. Input font must have "
"a STAT table with Axis Value Tables",
)
parser.add_argument(
"--named-instances",
"-n",
nargs="?",
const=".*",
help="Create static instances for each instance in the variable font's "
"fvar table matching the given regular expression",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
if options.remove_overlaps:
if options.ignore_overlap_errors:
options.overlap = OverlapMode.REMOVE_AND_IGNORE_ERRORS
else:
options.overlap = OverlapMode.REMOVE
else:
options.overlap = OverlapMode(int(options.overlap))
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
try:
axisLimits = parseLimits(options.locargs)
except ValueError as e:
parser.error(str(e))
if len(axisLimits) != len(options.locargs):
parser.error("Specified multiple limits for the same axis")
if axisLimits and options.named_instances:
parser.error("Specified limits and --named-instances")
return (infile, axisLimits, options)
|
40,106 |
def find_edges(data_graph, edge_id, lib, file_object):
target_id = None
for node in data_graph['nodes']:
if node['label'] == lib:
target_id = node['id']
if target_id is not None:
edge = {'source': file_object['_id'], 'target': target_id, 'id': edge_id}
data_graph['edges'].append(edge)
edge_id += 1
return data_graph, edge_id
|
def find_edges(data_graph, edge_id, lib, file_object):
target_id = None
for node in data_graph['nodes']:
if node['label'] == lib:
target_id = node['id']
if target_id is not None:
edge = {'source': file_object['_id'], 'target': target_id, 'id': edge_id}
data_graph['edges'].append(edge)
edge_id += 1
return edge_id
|
7,348 |
def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8,
normalization='l1', sigmas=None, ring_radii=None, visualize=False):
'''Extract DAISY feature descriptors densely for the given image.
DAISY is a feature descriptor similar to SIFT formulated in a way that
allows for fast dense extraction. Typically, this is practical for
bag-of-features image representations.
The implementation follows Tola et al. [1]_ but deviate on the following
points:
* Histogram bin contribution are smoothed with a circular Gaussian
window over the tonal range (the angular range).
* The sigma values of the spatial Gaussian smoothing in this code do not
match the sigma values in the original code by Tola et al. [2]_. In
their code, spatial smoothing is applied to both the input image and
the center histogram. However, this smoothing is not documented in [1]_
and, therefore, it is omitted.
Parameters
----------
image : (M, N) array
Input image (grayscale).
step : int, optional
Distance between descriptor sampling points.
radius : int, optional
Radius (in pixels) of the outermost ring.
rings : int, optional
Number of rings.
histograms : int, optional
Number of histograms sampled per ring.
orientations : int, optional
Number of orientations (bins) per histogram.
normalization : [ 'l1' | 'l2' | 'daisy' | 'off' ], optional
How to normalize the descriptors
* 'l1': L1-normalization of each descriptor.
* 'l2': L2-normalization of each descriptor.
* 'daisy': L2-normalization of individual histograms.
* 'off': Disable normalization.
sigmas : 1D array of float, optional
Standard deviation of spatial Gaussian smoothing for the center
histogram and for each ring of histograms. The array of sigmas should
be sorted from the center and out. I.e. the first sigma value defines
the spatial smoothing of the center histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the following parameter.
``rings = len(sigmas) - 1``
ring_radii : 1D array of int, optional
Radius (in pixels) for each ring. Specifying ring_radii overrides the
following two parameters.
``rings = len(ring_radii)``
``radius = ring_radii[-1]``
If both sigmas and ring_radii are given, they must satisfy the
following predicate since no radius is needed for the center
histogram.
``len(ring_radii) == len(sigmas) + 1``
visualize : bool, optional
Generate a visualization of the DAISY descriptors
Returns
-------
descs : array
Grid of DAISY descriptors for the given image as an array
dimensionality (P, Q, R) where
``P = ceil((M - radius*2) / step)``
``Q = ceil((N - radius*2) / step)``
``R = (rings * histograms + 1) * orientations``
descs_img : (M, N, 3) array (only if visualize==True)
Visualization of the DAISY descriptors.
References
----------
.. [1] Tola et al. "Daisy: An efficient dense descriptor applied to wide-
baseline stereo." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 32.5 (2010): 815-830.
.. [2] http://cvlab.epfl.ch/software/daisy
'''
check_nD(image, 2, 'img')
image = img_as_float(image)
float_dtype = image.dtype
# Validate parameters.
if sigmas is not None and ring_radii is not None \
and len(sigmas) - 1 != len(ring_radii):
raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization not in ['l1', 'l2', 'daisy', 'off']:
raise ValueError('Invalid normalization method.')
# Compute image derivatives.
dx = np.zeros(image.shape, dtype=float_dtype)
dy = np.zeros(image.shape, dtype=float_dtype)
dx[:, :-1] = np.diff(image, n=1, axis=1)
dy[:-1, :] = np.diff(image, n=1, axis=0)
# Compute gradient orientation and magnitude and their contribution
# to the histograms.
grad_mag = sqrt(dx ** 2 + dy ** 2)
grad_ori = arctan2(dy, dx)
orientation_kappa = orientations / pi
orientation_angles = [2 * o * pi / orientations - pi
for o in range(orientations)]
hist = np.empty((orientations,) + image.shape, dtype=float_dtype)
for i, o in enumerate(orientation_angles):
# Weigh bin contribution by the circular normal distribution
hist[i, :, :] = exp(orientation_kappa * np.cos(grad_ori - o))
# Weigh bin contribution by the gradient magnitude
hist[i, :, :] = np.multiply(hist[i, :, :], grad_mag)
# Smooth orientation histograms for the center and all rings.
sigmas = [sigmas[0]] + sigmas
hist_smooth = np.empty((rings + 1,) + hist.shape, dtype=float_dtype)
for i in range(rings + 1):
for j in range(orientations):
hist_smooth[i, j, :, :] = gaussian(hist[j, :, :], sigma=sigmas[i])
# Assemble descriptor grid.
theta = [2 * pi * j / histograms for j in range(histograms)]
desc_dims = (rings * histograms + 1) * orientations
descs = np.empty((desc_dims, image.shape[0] - 2 * radius,
image.shape[1] - 2 * radius),
dtype=float_dtype)
descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius,
radius:-radius]
idx = orientations
for i in range(rings):
for j in range(histograms):
y_min = radius + int(round(ring_radii[i] * math.sin(theta[j])))
y_max = descs.shape[1] + y_min
x_min = radius + int(round(ring_radii[i] * math.cos(theta[j])))
x_max = descs.shape[2] + x_min
descs[idx:idx + orientations, :, :] = hist_smooth[i + 1, :,
y_min:y_max,
x_min:x_max]
idx += orientations
descs = descs[:, ::step, ::step]
descs = descs.swapaxes(0, 1).swapaxes(1, 2)
# Normalize descriptors.
if normalization != 'off':
descs += 1e-10
if normalization == 'l1':
descs /= np.sum(descs, axis=2)[:, :, np.newaxis]
elif normalization == 'l2':
descs /= sqrt(np.sum(descs ** 2, axis=2))[:, :, np.newaxis]
elif normalization == 'daisy':
for i in range(0, desc_dims, orientations):
norms = sqrt(np.sum(descs[:, :, i:i + orientations] ** 2,
axis=2))
descs[:, :, i:i + orientations] /= norms[:, :, np.newaxis]
if visualize:
descs_img = gray2rgb(image)
for i in range(descs.shape[0]):
for j in range(descs.shape[1]):
# Draw center histogram sigma
color = [1, 0, 0]
desc_y = i * step + radius
desc_x = j * step + radius
rows, cols, val = draw.circle_perimeter_aa(desc_y, desc_x, int(sigmas[0]))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
max_bin = np.max(descs[i, j, :])
for o_num, o in enumerate(orientation_angles):
# Draw center histogram bins
bin_size = descs[i, j, o_num] / max_bin
dy = sigmas[0] * bin_size * math.sin(o)
dx = sigmas[0] * bin_size * math.cos(o)
rows, cols, val = draw.line_aa(desc_y, desc_x, int(desc_y + dy),
int(desc_x + dx))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
for r_num, r in enumerate(ring_radii):
color_offset = float(1 + r_num) / rings
color = (1 - color_offset, 1, color_offset)
for t_num, t in enumerate(theta):
# Draw ring histogram sigmas
hist_y = desc_y + int(round(r * math.sin(t)))
hist_x = desc_x + int(round(r * math.cos(t)))
rows, cols, val = draw.circle_perimeter_aa(hist_y, hist_x,
int(sigmas[r_num + 1]))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
for o_num, o in enumerate(orientation_angles):
# Draw histogram bins
bin_size = descs[i, j, orientations + r_num *
histograms * orientations +
t_num * orientations + o_num]
bin_size /= max_bin
dy = sigmas[r_num + 1] * bin_size * math.sin(o)
dx = sigmas[r_num + 1] * bin_size * math.cos(o)
rows, cols, val = draw.line_aa(hist_y, hist_x,
int(hist_y + dy),
int(hist_x + dx))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
return descs, descs_img
else:
return descs
|
def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8,
normalization='l1', sigmas=None, ring_radii=None, visualize=False):
'''Extract DAISY feature descriptors densely for the given image.
DAISY is a feature descriptor similar to SIFT formulated in a way that
allows for fast dense extraction. Typically, this is practical for
bag-of-features image representations.
The implementation follows Tola et al. [1]_ but deviate on the following
points:
* Histogram bin contribution are smoothed with a circular Gaussian
window over the tonal range (the angular range).
* The sigma values of the spatial Gaussian smoothing in this code do not
match the sigma values in the original code by Tola et al. [2]_. In
their code, spatial smoothing is applied to both the input image and
the center histogram. However, this smoothing is not documented in [1]_
and, therefore, it is omitted.
Parameters
----------
image : (M, N) array
Input image (grayscale).
step : int, optional
Distance between descriptor sampling points.
radius : int, optional
Radius (in pixels) of the outermost ring.
rings : int, optional
Number of rings.
histograms : int, optional
Number of histograms sampled per ring.
orientations : int, optional
Number of orientations (bins) per histogram.
normalization : [ 'l1' | 'l2' | 'daisy' | 'off' ], optional
How to normalize the descriptors
* 'l1': L1-normalization of each descriptor.
* 'l2': L2-normalization of each descriptor.
* 'daisy': L2-normalization of individual histograms.
* 'off': Disable normalization.
sigmas : 1D array of float, optional
Standard deviation of spatial Gaussian smoothing for the center
histogram and for each ring of histograms. The array of sigmas should
be sorted from the center and out. I.e. the first sigma value defines
the spatial smoothing of the center histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the following parameter.
``rings = len(sigmas) - 1``
ring_radii : 1D array of int, optional
Radius (in pixels) for each ring. Specifying ring_radii overrides the
following two parameters.
``rings = len(ring_radii)``
``radius = ring_radii[-1]``
If both sigmas and ring_radii are given, they must satisfy the
following predicate since no radius is needed for the center
histogram.
``len(ring_radii) == len(sigmas) + 1``
visualize : bool, optional
Generate a visualization of the DAISY descriptors
Returns
-------
descs : array
Grid of DAISY descriptors for the given image as an array
dimensionality (P, Q, R) where
``P = ceil((M - radius*2) / step)``
``Q = ceil((N - radius*2) / step)``
``R = (rings * histograms + 1) * orientations``
descs_img : (M, N, 3) array (only if visualize==True)
Visualization of the DAISY descriptors.
References
----------
.. [1] Tola et al. "Daisy: An efficient dense descriptor applied to wide-
baseline stereo." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 32.5 (2010): 815-830.
.. [2] http://cvlab.epfl.ch/software/daisy
'''
check_nD(image, 2, 'img')
image = img_as_float(image)
float_dtype = image.dtype
# Validate parameters.
if sigmas is not None and ring_radii is not None \
and len(sigmas) - 1 != len(ring_radii):
raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization not in ['l1', 'l2', 'daisy', 'off']:
raise ValueError('Invalid normalization method.')
# Compute image derivatives.
dx = np.zeros(image.shape, dtype=float_dtype)
dy = np.zeros(image.shape, dtype=float_dtype)
dx[:, :-1] = np.diff(image, n=1, axis=1)
dy[:-1, :] = np.diff(image, n=1, axis=0)
# Compute gradient orientation and magnitude and their contribution
# to the histograms.
grad_mag = sqrt(dx ** 2 + dy ** 2)
grad_ori = arctan2(dy, dx)
orientation_kappa = orientations / pi
orientation_angles = [2 * o * pi / orientations - pi
for o in range(orientations)]
hist = np.empty((orientations,) + image.shape, dtype=float_dtype)
for i, o in enumerate(orientation_angles):
# Weigh bin contribution by the circular normal distribution
hist[i, :, :] = exp(orientation_kappa * math.cos(grad_ori - o))
# Weigh bin contribution by the gradient magnitude
hist[i, :, :] = np.multiply(hist[i, :, :], grad_mag)
# Smooth orientation histograms for the center and all rings.
sigmas = [sigmas[0]] + sigmas
hist_smooth = np.empty((rings + 1,) + hist.shape, dtype=float_dtype)
for i in range(rings + 1):
for j in range(orientations):
hist_smooth[i, j, :, :] = gaussian(hist[j, :, :], sigma=sigmas[i])
# Assemble descriptor grid.
theta = [2 * pi * j / histograms for j in range(histograms)]
desc_dims = (rings * histograms + 1) * orientations
descs = np.empty((desc_dims, image.shape[0] - 2 * radius,
image.shape[1] - 2 * radius),
dtype=float_dtype)
descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius,
radius:-radius]
idx = orientations
for i in range(rings):
for j in range(histograms):
y_min = radius + int(round(ring_radii[i] * math.sin(theta[j])))
y_max = descs.shape[1] + y_min
x_min = radius + int(round(ring_radii[i] * math.cos(theta[j])))
x_max = descs.shape[2] + x_min
descs[idx:idx + orientations, :, :] = hist_smooth[i + 1, :,
y_min:y_max,
x_min:x_max]
idx += orientations
descs = descs[:, ::step, ::step]
descs = descs.swapaxes(0, 1).swapaxes(1, 2)
# Normalize descriptors.
if normalization != 'off':
descs += 1e-10
if normalization == 'l1':
descs /= np.sum(descs, axis=2)[:, :, np.newaxis]
elif normalization == 'l2':
descs /= sqrt(np.sum(descs ** 2, axis=2))[:, :, np.newaxis]
elif normalization == 'daisy':
for i in range(0, desc_dims, orientations):
norms = sqrt(np.sum(descs[:, :, i:i + orientations] ** 2,
axis=2))
descs[:, :, i:i + orientations] /= norms[:, :, np.newaxis]
if visualize:
descs_img = gray2rgb(image)
for i in range(descs.shape[0]):
for j in range(descs.shape[1]):
# Draw center histogram sigma
color = [1, 0, 0]
desc_y = i * step + radius
desc_x = j * step + radius
rows, cols, val = draw.circle_perimeter_aa(desc_y, desc_x, int(sigmas[0]))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
max_bin = np.max(descs[i, j, :])
for o_num, o in enumerate(orientation_angles):
# Draw center histogram bins
bin_size = descs[i, j, o_num] / max_bin
dy = sigmas[0] * bin_size * math.sin(o)
dx = sigmas[0] * bin_size * math.cos(o)
rows, cols, val = draw.line_aa(desc_y, desc_x, int(desc_y + dy),
int(desc_x + dx))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
for r_num, r in enumerate(ring_radii):
color_offset = float(1 + r_num) / rings
color = (1 - color_offset, 1, color_offset)
for t_num, t in enumerate(theta):
# Draw ring histogram sigmas
hist_y = desc_y + int(round(r * math.sin(t)))
hist_x = desc_x + int(round(r * math.cos(t)))
rows, cols, val = draw.circle_perimeter_aa(hist_y, hist_x,
int(sigmas[r_num + 1]))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
for o_num, o in enumerate(orientation_angles):
# Draw histogram bins
bin_size = descs[i, j, orientations + r_num *
histograms * orientations +
t_num * orientations + o_num]
bin_size /= max_bin
dy = sigmas[r_num + 1] * bin_size * math.sin(o)
dx = sigmas[r_num + 1] * bin_size * math.cos(o)
rows, cols, val = draw.line_aa(hist_y, hist_x,
int(hist_y + dy),
int(hist_x + dx))
draw.set_color(descs_img, (rows, cols), color, alpha=val)
return descs, descs_img
else:
return descs
|
55,593 |
def _remove_breaks_for_special_dates(
midnight_utcs, break_start_or_end, special_opens_or_closes
):
"""
Overwrite breaks in break_start_or_end with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when we have no breaks
if break_start_or_end is None:
return
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(break_start_or_end)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as break_starts,\n"
"but len(midnight_utcs)=%d, len(break_start_or_end)=%d"
% len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
break_start_or_end.values[indexer] = np.int64(pd.NaT)
|
def _remove_breaks_for_special_dates(
midnight_utcs, break_start_or_end, special_opens_or_closes
):
"""
Overwrite breaks in break_start_or_end on corresponding dates in
special_opens_or_closes with NaT, using midnight_utcs for alignment.
"""
# Short circuit when we have no breaks
if break_start_or_end is None:
return
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(break_start_or_end)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as break_starts,\n"
"but len(midnight_utcs)=%d, len(break_start_or_end)=%d"
% len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
break_start_or_end.values[indexer] = np.int64(pd.NaT)
|
19,976 |
def get_var(times_ranked, cache):
while times_ranked >= len(cache):
next_var = 1. / (1. / (cache[-1] + VAR_PER_CONTEST) + 1. / BETA2)
cache.append(next_var)
return cache[times_ranked]
|
def get_var(times_ranked, cache=[VAR_INIT]):
while times_ranked >= len(cache):
next_var = 1. / (1. / (cache[-1] + VAR_PER_CONTEST) + 1. / BETA2)
cache.append(next_var)
return cache[times_ranked]
|
38,359 |
def verify_yt_installation(binary_yt):
yt_dir = glob.glob("yt-*")
ndirs = len(yt_dir)
if ndirs != 1:
raise RuntimeError("A yt installation was not properly cleaned up, exiting")
yt_dir = yt_dir[0]
python_path = os.sep.join([yt_dir, "bin", "python"])
for dep in OPTIONAL_DEPS + REQUIRED_DEPS:
if binary_yt and dep in YT_SOURCE_ONLY_DEPS:
continue
if dep == "git":
git_path = os.sep.join([yt_dir, "bin", "git"])
call_unix_command(f"{git_path} --version")
if dep in DEPENDENCY_IMPORT_TESTS:
cmd = "{} -c '{}'"
if dep == "rockstar":
# compiling rockstar is broken on newer MacOS releases
if platform.mac_ver()[0].startswith(("10.12", "10.13")):
continue
cmd = (
f"LD_LIBRARY_PATH={os.sep.join([os.curdir, yt_dir, 'lib'])} " + cmd
)
if sys.platform == "darwin":
cmd = "DY" + cmd
call_unix_command(cmd.format(python_path, DEPENDENCY_IMPORT_TESTS[dep]))
else:
call_unix_command(f"{python_path} -c 'import {dep}'")
return yt_dir
|
def verify_yt_installation(binary_yt):
yt_dir = glob.glob("yt-*")
ndirs = len(yt_dir)
if ndirs != 1:
raise RuntimeError("A yt installation was not properly cleaned up, exiting")
yt_dir = yt_dir[0]
python_path = os.sep.join([yt_dir, "bin", "python"])
for dep in OPTIONAL_DEPS + REQUIRED_DEPS:
if binary_yt and dep in YT_SOURCE_ONLY_DEPS:
continue
if dep == "git":
git_path = os.sep.join([yt_dir, "bin", "git"])
call_unix_command(f"{git_path} --version")
if dep in DEPENDENCY_IMPORT_TESTS:
cmd = "{} -c '{}'"
if dep == "rockstar":
# compiling rockstar is broken on newer MacOS releases
if platform.mac_ver()[0].startswith(("10.12", "10.13")):
continue
cmd = (
f"LD_LIBRARY_PATH={os.path.join(os.curdir, yt_dir, 'lib')} " + cmd
)
if sys.platform == "darwin":
cmd = "DY" + cmd
call_unix_command(cmd.format(python_path, DEPENDENCY_IMPORT_TESTS[dep]))
else:
call_unix_command(f"{python_path} -c 'import {dep}'")
return yt_dir
|
41,495 |
def extract_index_access(
baseviewer, subviewer, indices,
):
tensorlib, _ = get_backend()
index_selection = []
stitched = None
indices_concatenated = None
if subviewer:
index_selection = baseviewer.split(indices, selection=subviewer.names)
stitched = subviewer.stitch(index_selection)
# LH: the transpose is here so that modifier code doesn't have to do it
indices_concatenated = tensorlib.astensor(
tensorlib.einsum('ij->ji', stitched)
if len(tensorlib.shape(stitched)) > 1
else stitched,
dtype='int',
)
return index_selection, stitched, indices_concatenated
|
def extract_index_access(
baseviewer, subviewer, indices,
):
tensorlib, _ = get_backend()
index_selection = []
stitched = None
indices_concatenated = None
if subviewer:
index_selection = baseviewer.split(indices, selection=subviewer.names)
stitched = subviewer.stitch(index_selection)
# the transpose is here so that modifier code doesn't have to do it
indices_concatenated = tensorlib.astensor(
tensorlib.einsum('ij->ji', stitched)
if len(tensorlib.shape(stitched)) > 1
else stitched,
dtype='int',
)
return index_selection, stitched, indices_concatenated
|
32,088 |
def run_polling_command(client: MsClient, args: dict, cmd: str, action_func: Callable,
results_function: Callable, post_polling_process: Callable):
"""
This function is generically handling the polling flow. In the polling flow, there is always an initial call that
starts the uploading to the API (referred here as the 'upload' function) and another call that retrieves the status
of that upload (referred here as the 'results' function).
The run_polling_command function runs the 'upload' function and returns a ScheduledCommand object that schedules
the next 'results' function, until the polling is complete.
Args:
args: the arguments required to the command being called, under cmd
cmd: the command to schedule by after the current command
upload_function: the function that initiates the uploading to the API
results_function: the function that retrieves the status of the previously initiated upload process
uploaded_item: the type of item being uploaded
Returns:
"""
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 5))
# distinguish between the initial run, which is the upload run, and the results run
is_first_run = 'machine_action_id' not in args
demisto.debug(f'polling args: {args}')
if is_first_run:
command_results = action_func(client, args)
outputs = command_results.outputs
# schedule next poll
polling_args = {
'machine_action_id': outputs.get('action_id'),
'interval_in_seconds': interval_in_secs,
'polling': True,
**args,
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=600)
command_results.scheduled_command = scheduled_command
return command_results
# not a first run
command_result = results_function(client, args)
action_status = command_result.outputs.get("status")
command_status = command_result.outputs.get("commands", [])[0].get("commandStatus")
if action_status == 'Failed' or command_status == 'Failed':
raise Exception(f'Command failed to get results. {command_result.outputs.get("commands", [])[0].get("errors")}')
elif command_status != 'Completed' or action_status == 'InProgress':
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=600)
command_result = CommandResults(scheduled_command=scheduled_command)
return command_result
# action was completed
else:
return post_polling_process(client, command_result.outputs)
|
def run_polling_command(client: MsClient, args: dict, cmd: str, action_func: Callable,
results_function: Callable, post_polling_process: Callable):
"""
This function is generically handling the polling flow. In the polling flow, there is always an initial call that
starts the uploading to the API (referred here as the 'upload' function) and another call that retrieves the status
of that upload (referred here as the 'results' function).
The run_polling_command function runs the 'upload' function and returns a ScheduledCommand object that schedules
the next 'results' function, until the polling is complete.
Args:
args: the arguments required to the command being called, under cmd
cmd: the command to schedule by after the current command
upload_function: the function that initiates the uploading to the API
results_function: the function that retrieves the status of the previously initiated upload process
uploaded_item: the type of item being uploaded
Returns:
"""
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 10))
# distinguish between the initial run, which is the upload run, and the results run
is_first_run = 'machine_action_id' not in args
demisto.debug(f'polling args: {args}')
if is_first_run:
command_results = action_func(client, args)
outputs = command_results.outputs
# schedule next poll
polling_args = {
'machine_action_id': outputs.get('action_id'),
'interval_in_seconds': interval_in_secs,
'polling': True,
**args,
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=600)
command_results.scheduled_command = scheduled_command
return command_results
# not a first run
command_result = results_function(client, args)
action_status = command_result.outputs.get("status")
command_status = command_result.outputs.get("commands", [])[0].get("commandStatus")
if action_status == 'Failed' or command_status == 'Failed':
raise Exception(f'Command failed to get results. {command_result.outputs.get("commands", [])[0].get("errors")}')
elif command_status != 'Completed' or action_status == 'InProgress':
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=600)
command_result = CommandResults(scheduled_command=scheduled_command)
return command_result
# action was completed
else:
return post_polling_process(client, command_result.outputs)
|
5,868 |
def _dict_formatter(d, n=0, mplus=1, sorter=None):
"""
Pretty printer for dictionaries
`n` keeps track of the starting indentation;
lines are indented by this much after a line break.
`mplus` is additional left padding applied to keys
"""
if isinstance(d, dict):
m = max(map(len, list(d.keys()))) + mplus # width to print keys
s = '\n'.join([k.rjust(m) + ': ' + # right justified, width m
_indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2)
for k, v in sorter(d)]) # +2 for ': '
else:
# By default, NumPy arrays print with linewidth is 76; `n` is
# the indent at which they begin printing, so it is subtracted
# from the default to avoid exceeding 76 characters total.
# `edgeitems` is the number of elements to include before and after
# ellipses when arrays are not shown in full.
# `threshold` is the maximum number of elements for which an
# array is shown in full.
# These values tend to work well for use with OptimizeResult.
with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12,
formatter={'float_kind': _float_formatter_10}):
s = str(d)
return s
|
def _dict_formatter(d, n=0, mplus=1, sorter=None):
"""
Pretty printer for dictionaries
`n` keeps track of the starting indentation;
lines are indented by this much after a line break.
`mplus` is additional left padding applied to keys
"""
if isinstance(d, dict):
m = max(map(len, list(d.keys()))) + mplus # width to print keys
s = '\n'.join([k.rjust(m) + ': ' + # right justified, width m
_indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2)
for k, v in sorter(d)]) # +2 for ': '
else:
# By default, NumPy arrays print with linewidth=76. `n` is
# the indent at which they begin printing, so it is subtracted
# from the default to avoid exceeding 76 characters total.
# `edgeitems` is the number of elements to include before and after
# ellipses when arrays are not shown in full.
# `threshold` is the maximum number of elements for which an
# array is shown in full.
# These values tend to work well for use with OptimizeResult.
with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12,
formatter={'float_kind': _float_formatter_10}):
s = str(d)
return s
|
2,335 |
def permutation_importance(
estimator,
X,
y,
*,
scoring=None,
n_repeats=5,
n_jobs=None,
random_state=None,
sample_weight=None,
max_samples=1.0,
):
"""Permutation importance for feature evaluation [BRE]_.
The :term:`estimator` is required to be a fitted estimator. `X` can be the
data set used to train the estimator or a hold-out set. The permutation
importance of a feature is calculated as follows. First, a baseline metric,
defined by :term:`scoring`, is evaluated on a (potentially different)
dataset defined by the `X`. Next, a feature column from the validation set
is permuted and the metric is evaluated again. The permutation importance
is defined to be the difference between the baseline metric and metric from
permutating the feature column.
Read more in the :ref:`User Guide <permutation_importance>`.
Parameters
----------
estimator : object
An estimator that has already been :term:`fitted` and is compatible
with :term:`scorer`.
X : ndarray or DataFrame, shape (n_samples, n_features)
Data on which permutation importance will be computed.
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
Targets for supervised or `None` for unsupervised.
scoring : str, callable, list, tuple, or dict, default=None
Scorer to use.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
Passing multiple scores to `scoring` is more efficient than calling
`permutation_importance` for each of the scores as it reuses
predictions to avoid redundant computation.
If None, the estimator's default scorer is used.
n_repeats : int, default=5
Number of times to permute a feature.
n_jobs : int or None, default=None
Number of jobs to run in parallel. The computation is done by computing
permutation score for each columns and parallelized over the columns.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
See :term: `Glossary <random_state>`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights used in scoring.
max_samples : int or float, default=1.0
The number of samples to draw from X to compute feature importance in each repeat (without
replacement).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
No effect if max_samples is equal to 1.0 or X.shape[0].
.. versionadded:: 1.0.dev0
Returns
-------
result : :class:`~sklearn.utils.Bunch` or dict of such instances
Dictionary-like object, with the following attributes.
importances_mean : ndarray of shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray of shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray of shape (n_features, n_repeats)
Raw permutation importance scores.
If there are multiple scoring metrics in the scoring parameter
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
`Bunch` objects like above as values.
References
----------
.. [BRE] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. https://doi.org/10.1023/A:1010933404324
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import permutation_importance
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
>>> y = [1, 1, 1, 0, 0, 0]
>>> clf = LogisticRegression().fit(X, y)
>>> result = permutation_importance(clf, X, y, n_repeats=10,
... random_state=0)
>>> result.importances_mean
array([0.4666..., 0. , 0. ])
>>> result.importances_std
array([0.2211..., 0. , 0. ])
"""
if not hasattr(X, "iloc"):
X = check_array(X, force_all_finite="allow-nan", dtype=None)
# Precompute random seed from the random state to be used
# to get a fresh independent RandomState instance for each
# parallel call to _calculate_permutation_scores, irrespective of
# the fact that variables are shared or not depending on the active
# joblib backend (sequential, thread-based or process-based).
random_state = check_random_state(random_state)
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
# Validate max_samples
if not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
elif not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
if callable(scoring):
scorer = scoring
elif scoring is None or isinstance(scoring, str):
scorer = check_scoring(estimator, scoring=scoring)
else:
scorers_dict = _check_multimetric_scoring(estimator, scoring)
scorer = _MultimetricScorer(**scorers_dict)
baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight)
scores = Parallel(n_jobs=n_jobs)(
delayed(_calculate_permutation_scores)(
estimator,
X,
y,
sample_weight,
col_idx,
random_seed,
n_repeats,
scorer,
max_samples,
)
for col_idx in range(X.shape[1])
)
if isinstance(baseline_score, dict):
return {
name: _create_importances_bunch(
baseline_score[name],
# unpack the permuted scores
np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]),
)
for name in baseline_score
}
else:
return _create_importances_bunch(baseline_score, np.array(scores))
|
def permutation_importance(
estimator,
X,
y,
*,
scoring=None,
n_repeats=5,
n_jobs=None,
random_state=None,
sample_weight=None,
max_samples=1.0,
):
"""Permutation importance for feature evaluation [BRE]_.
The :term:`estimator` is required to be a fitted estimator. `X` can be the
data set used to train the estimator or a hold-out set. The permutation
importance of a feature is calculated as follows. First, a baseline metric,
defined by :term:`scoring`, is evaluated on a (potentially different)
dataset defined by the `X`. Next, a feature column from the validation set
is permuted and the metric is evaluated again. The permutation importance
is defined to be the difference between the baseline metric and metric from
permutating the feature column.
Read more in the :ref:`User Guide <permutation_importance>`.
Parameters
----------
estimator : object
An estimator that has already been :term:`fitted` and is compatible
with :term:`scorer`.
X : ndarray or DataFrame, shape (n_samples, n_features)
Data on which permutation importance will be computed.
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
Targets for supervised or `None` for unsupervised.
scoring : str, callable, list, tuple, or dict, default=None
Scorer to use.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
Passing multiple scores to `scoring` is more efficient than calling
`permutation_importance` for each of the scores as it reuses
predictions to avoid redundant computation.
If None, the estimator's default scorer is used.
n_repeats : int, default=5
Number of times to permute a feature.
n_jobs : int or None, default=None
Number of jobs to run in parallel. The computation is done by computing
permutation score for each columns and parallelized over the columns.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
See :term: `Glossary <random_state>`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights used in scoring.
max_samples : int or float, default=1.0
The number of samples to draw from X to compute feature importance in each repeat (without
replacement).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
No effect if max_samples is equal to 1.0 or X.shape[0].
.. versionadded:: 1.0
Returns
-------
result : :class:`~sklearn.utils.Bunch` or dict of such instances
Dictionary-like object, with the following attributes.
importances_mean : ndarray of shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray of shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray of shape (n_features, n_repeats)
Raw permutation importance scores.
If there are multiple scoring metrics in the scoring parameter
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
`Bunch` objects like above as values.
References
----------
.. [BRE] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. https://doi.org/10.1023/A:1010933404324
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import permutation_importance
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
>>> y = [1, 1, 1, 0, 0, 0]
>>> clf = LogisticRegression().fit(X, y)
>>> result = permutation_importance(clf, X, y, n_repeats=10,
... random_state=0)
>>> result.importances_mean
array([0.4666..., 0. , 0. ])
>>> result.importances_std
array([0.2211..., 0. , 0. ])
"""
if not hasattr(X, "iloc"):
X = check_array(X, force_all_finite="allow-nan", dtype=None)
# Precompute random seed from the random state to be used
# to get a fresh independent RandomState instance for each
# parallel call to _calculate_permutation_scores, irrespective of
# the fact that variables are shared or not depending on the active
# joblib backend (sequential, thread-based or process-based).
random_state = check_random_state(random_state)
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
# Validate max_samples
if not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
elif not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
if callable(scoring):
scorer = scoring
elif scoring is None or isinstance(scoring, str):
scorer = check_scoring(estimator, scoring=scoring)
else:
scorers_dict = _check_multimetric_scoring(estimator, scoring)
scorer = _MultimetricScorer(**scorers_dict)
baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight)
scores = Parallel(n_jobs=n_jobs)(
delayed(_calculate_permutation_scores)(
estimator,
X,
y,
sample_weight,
col_idx,
random_seed,
n_repeats,
scorer,
max_samples,
)
for col_idx in range(X.shape[1])
)
if isinstance(baseline_score, dict):
return {
name: _create_importances_bunch(
baseline_score[name],
# unpack the permuted scores
np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]),
)
for name in baseline_score
}
else:
return _create_importances_bunch(baseline_score, np.array(scores))
|
42,783 |
def test_dataframe_multiIndex_index_column():
data = {
"x":
pd.DataFrame([[2, 3], [6, 7]],
index=pd.MultiIndex.from_arrays([['a', 'b'], ['y', 'z']]),
columns=pd.MultiIndex.from_arrays([['m', 'n'], ['p',
'q']]))
}
with pytest.raises(ValueError):
assert expand_grid(others=data)
|
def test_dataframe_multi_index_index_column():
data = {
"x":
pd.DataFrame([[2, 3], [6, 7]],
index=pd.MultiIndex.from_arrays([['a', 'b'], ['y', 'z']]),
columns=pd.MultiIndex.from_arrays([['m', 'n'], ['p',
'q']]))
}
with pytest.raises(ValueError):
assert expand_grid(others=data)
|
28,418 |
def set_new_attribute_deprecated(self: object, key: str, value: object) -> None:
org = len(self.__dict__)
object.__setattr__(self, key, value)
new = len(self.__dict__)
if new > org:
warnings.warn(
"Setting custom attributes is deprecated and will be removed in the next" " version",
TelegramDeprecationWarning,
)
|
def set_new_attribute_deprecated(self: object, key: str, value: object) -> None:
org = len(self.__dict__)
object.__setattr__(self, key, value)
new = len(self.__dict__)
if new > org:
warnings.warn(
"Setting custom attributes on objects of the PTB library is deprecated.",
TelegramDeprecationWarning,
)
|
23,622 |
def fit_sandia(curves, p_ac_0, p_nt):
r'''
Determine parameters for the Sandia inverter model from efficiency
curves.
Parameters
----------
curves : DataFrame
Columns must be ``'fraction_of_rated_power'``, ``'dc_voltage_level'``,
``'dc_voltage'``, ``'ac_power'``, ``'efficiency'``. See notes for the
definition and unit for each column.
p_ac_0 : numeric
Rated AC power of the inverter [W].
p_nt : numeric
Night tare, i.e., power consumed while inverter is not delivering
AC power. [W]
Returns
-------
dict with parameters for the Sandia inverter model. See
:py:func:`snl_inverter` for a description of entries in the returned dict.
See Also
--------
snlinverter
Notes
-----
An inverter efficiency curve comprises a series of pairs
('fraction_of_rated_power', 'efficiency'), e.g. (0.1, 0.5), (0.2, 0.7),
etc. at a specified DC voltage level and AC power level. The DataFrame
`curves` must contain at least one efficiency curve for each combination
of DC voltage level and AC power level. Columns in `curves` must be the
following:
================ ========================================
Column name Description
================ ========================================
'fraction_of_rated_power' Fraction of rated AC power `p_ac_0`. The
CEC inverter test protocol specifies values
of 0.1, 0.2, 0.3, 0.5, 0.75 and 1.0. [unitless]
'dc_voltage_level' Must be 'Vmin', 'Vnom', or 'Vmax'. Curves must
be provided for all three voltage levels. At
least one curve must be provided for each
combination of fraction_of_rated_power and
dc_voltage_level.
'dc_voltage' DC input voltage. [V]
'ac_power' Output AC power. [W]
'efficiency' Ratio of AC output power to DC input power.
[unitless]
For each curve, DC input power is calculated from AC power and efficiency.
References
----------
.. [1] SAND2007-5036, "Performance Model for Grid-Connected
Photovoltaic Inverters by D. King, S. Gonzalez, G. Galbraith, W.
Boyson
.. [2] Sandia Inverter Model page, PV Performance Modeling Collaborative
https://pvpmc.sandia.gov/modeling-steps/dc-to-ac-conversion/sandia-inverter-model/ # noqa: E501
'''
voltage_levels = ['Vmin', 'Vnom', 'Vmax']
# average dc input voltage at each voltage level
v_d = np.array(
[curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmin'].mean(),
curves['dc_voltage'][curves['dc_voltage_level'] == 'Vnom'].mean(),
curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmax'].mean()])
v_nom = v_d[1] # model parameter
# independent variable for regressions, x_d
x_d = v_d - v_nom
curves['dc_power'] = curves['ac_power'] / curves['efficiency']
# empty dataframe to contain intermediate variables
coeffs = pd.DataFrame(index=voltage_levels,
columns=['a', 'b', 'c', 'p_dc', 'p_s0'], data=np.nan)
def solve_quad(a, b, c):
return (-b + (b**2 - 4 * a * c)**.5) / (2 * a)
# [2] STEP 3E, fit a line to (DC voltage, model_coefficient)
def extract_c(x_d, add):
test = polyfit(x_d, add, 1)
beta0, beta1 = test
c = beta1 / beta0
return beta0, beta1, c
for d in voltage_levels:
x = curves['dc_power'][curves['dc_voltage_level'] == d]
y = curves['ac_power'][curves['dc_voltage_level'] == d]
# [2] STEP 3B
# fit a quadratic to (DC power, AC power)
c, b, a = polyfit(x, y, 2)
# [2] STEP 3D, solve for p_dc and p_s0
p_dc = solve_quad(a, b, (c - p_ac_0))
p_s0 = solve_quad(a, b, c)
# Add values to dataframe at index d
coeffs['a'][d] = a
coeffs['b'][d] = b
coeffs['c'][d] = c
coeffs['p_dc'][d] = p_dc
coeffs['p_s0'][d] = p_s0
b_dc0, b_dc1, c1 = extract_c(x_d, coeffs['p_dc'])
b_s0, b_s1, c2 = extract_c(x_d, coeffs['p_s0'])
b_c0, b_c1, c3 = extract_c(x_d, coeffs['a'])
p_dc0 = b_dc0
p_s0 = b_s0
c0 = b_c0
# prepare dict and return
return {'Paco': p_ac_0, 'Pdco': p_dc0, 'Vdco': v_nom, 'Pso': p_s0,
'C0': c0, 'C1': c1, 'C2': c2, 'C3': c3, 'Pnt': p_nt}
|
def fit_sandia(curves, p_ac_0, p_nt):
r'''
Determine parameters for the Sandia inverter model from efficiency
curves.
Parameters
----------
curves : DataFrame
Columns must be ``'fraction_of_rated_power'``, ``'dc_voltage_level'``,
``'dc_voltage'``, ``'ac_power'``, ``'efficiency'``. See notes for the
definition and unit for each column.
p_ac_0 : numeric
Rated AC power of the inverter [W].
p_nt : numeric
Night tare, i.e., power consumed while inverter is not delivering
AC power. [W]
Returns
-------
dict with parameters for the Sandia inverter model. See
:py:func:`snl_inverter` for a description of entries in the returned dict.
See Also
--------
snlinverter
Notes
-----
An inverter efficiency curve comprises a series of pairs
('fraction_of_rated_power', 'efficiency'), e.g. (0.1, 0.5), (0.2, 0.7),
etc. at a specified DC voltage level and AC power level. The DataFrame
`curves` must contain at least one efficiency curve for each combination
of DC voltage level and AC power level. Columns in `curves` must be the
following:
================ ========================================
Column name Description
================ ========================================
'fraction_of_rated_power' Fraction of rated AC power `p_ac_0`. The
CEC inverter test protocol specifies values
of 0.1, 0.2, 0.3, 0.5, 0.75 and 1.0. [unitless]
'dc_voltage_level' Must be 'Vmin', 'Vnom', or 'Vmax'. Curves must
be provided for all three voltage levels. At
least one curve must be provided for each
combination of fraction_of_rated_power and
dc_voltage_level.
'dc_voltage' DC input voltage. [V]
'ac_power' Output AC power. [W]
'efficiency' Ratio of AC output power to DC input power.
[unitless]
For each curve, DC input power is calculated from AC power and efficiency.
References
----------
.. [1] SAND2007-5036, "Performance Model for Grid-Connected
Photovoltaic Inverters by D. King, S. Gonzalez, G. Galbraith, W.
Boyson
.. [2] Sandia Inverter Model page, PV Performance Modeling Collaborative
https://pvpmc.sandia.gov/modeling-steps/dc-to-ac-conversion/sandia-inverter-model/ # noqa: E501
'''
voltage_levels = ['Vmin', 'Vnom', 'Vmax']
# average dc input voltage at each voltage level
v_d = np.array(
[curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmin'].mean(),
curves['dc_voltage'][curves['dc_voltage_level'] == 'Vnom'].mean(),
curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmax'].mean()])
v_nom = v_d[1] # model parameter
# independent variable for regressions, x_d
x_d = v_d - v_nom
curves['dc_power'] = curves['ac_power'] / curves['efficiency']
# empty dataframe to contain intermediate variables
coeffs = pd.DataFrame(index=voltage_levels,
columns=['a', 'b', 'c', 'p_dc', 'p_s0'], data=np.nan)
def solve_quad(a, b, c):
return (-b + (b**2 - 4 * a * c)**.5) / (2 * a)
# [2] STEP 3E, fit a line to (DC voltage, model_coefficient)
def extract_c(x_d, add):
test = polyfit(x_d, add, 1)
beta0, beta1 = test
c = beta1 / beta0
return beta0, beta1, c
for d in voltage_levels:
x = curves['dc_power'][curves['dc_voltage_level'] == d]
y = curves['ac_power'][curves['dc_voltage_level'] == d]
# [2] STEP 3B
# fit a quadratic to (DC power, AC power)
c, b, a = polyfit(x, y, 2)
# [2] STEP 3D, solve for p_dc and p_s0
p_dc = solve_quad(a, b, (c - p_ac_0))
p_s0 = solve_quad(a, b, c)
# Add values to dataframe at index d
coeffs['a'][d] = a
coeffs['b'][d] = b
coeffs['c'][d] = c
coeffs['p_dc'][d] = p_dc
coeffs['p_s0'][d] = p_s0
p_dc0, _, c1 = extract_c(x_d, coeffs['p_dc'])
b_s0, b_s1, c2 = extract_c(x_d, coeffs['p_s0'])
b_c0, b_c1, c3 = extract_c(x_d, coeffs['a'])
p_dc0 = b_dc0
p_s0 = b_s0
c0 = b_c0
# prepare dict and return
return {'Paco': p_ac_0, 'Pdco': p_dc0, 'Vdco': v_nom, 'Pso': p_s0,
'C0': c0, 'C1': c1, 'C2': c2, 'C3': c3, 'Pnt': p_nt}
|
57,845 |
def filter_tests(tests: set, id_set: json, is_nightly=False) -> set:
"""
Filter tests out from the test set if they are:
a.Ignored
b.Non XSOAR or non-supported packs
c. tests of deprecated packs.
d. tests of private packs (optional)
Args:
tests (set): Set of tests collected so far.
id_set (dict): The ID set.
remove_private_packs (bool): Whether to remove private packs
Returns:
(set): Set of tests without ignored, non supported and deprecated-packs tests.
"""
tests_with_no_dummy_strings = {test for test in tests if 'no test' not in test.lower()}
tests_without_ignored = remove_ignored_tests(tests_with_no_dummy_strings, id_set)
tests_without_non_supported = remove_tests_for_non_supported_packs(tests_without_ignored, id_set)
if is_nightly:
# Removing private packs' tests from nightly, since they aren't runnable in nightly
# due to the fact they aren't in stored in the content repository.
remove_private_tests(tests_without_non_supported)
return tests_without_non_supported
|
def filter_tests(tests: set, id_set: json, is_nightly=False) -> set:
"""
Filter tests out from the test set if they are:
a. Ignored
b. Non-XSOAR or non-supported packs
c. tests of deprecated packs.
d. tests of private packs (optional)
Args:
tests (set): Set of tests collected so far.
id_set (dict): The ID set.
remove_private_packs (bool): Whether to remove private packs
Returns:
(set): Set of tests without ignored, non supported and deprecated-packs tests.
"""
tests_with_no_dummy_strings = {test for test in tests if 'no test' not in test.lower()}
tests_without_ignored = remove_ignored_tests(tests_with_no_dummy_strings, id_set)
tests_without_non_supported = remove_tests_for_non_supported_packs(tests_without_ignored, id_set)
if is_nightly:
# Removing private packs' tests from nightly, since they aren't runnable in nightly
# due to the fact they aren't in stored in the content repository.
remove_private_tests(tests_without_non_supported)
return tests_without_non_supported
|
12,958 |
def get_valid_shipping_methods_for_order(order: Order):
return ShippingMethod.objects.applicable_shipping_methods_for_instance(
order, channel=order.channel, price=order.get_subtotal().gross
)
|
def get_valid_shipping_methods_for_order(order: Order):
return ShippingMethod.objects.applicable_shipping_methods_for_instance(
order, channel_id=order.channel_id, price=order.get_subtotal().gross
)
|
54,671 |
def test_reverse_ens(rotkehlchen_api_server):
"""Test that we can reverse resolve ENS names"""
db = DBEns(rotkehlchen_api_server.rest_api.rotkehlchen.data.db)
db_conn = rotkehlchen_api_server.rest_api.rotkehlchen.data.db.conn
addrs_1 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0x2b888954421b424c5d3d9ce9bb67c9bd47537d12'),
]
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_1},
)
result = assert_proper_response_with_result(response)
expected_resp_1 = {
addrs_1[0]: 'rotki.eth',
addrs_1[1]: 'lefteris.eth',
}
assert result == expected_resp_1
addrs_2 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0xa4b73b39f73f73655e9fdc5d167c21b3fa4a1ed6'),
to_checksum_address('0x71C7656EC7ab88b098defB751B7401B5f6d8976F'),
]
timestamps_before_request = _get_timestamps(db, addrs_1)
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_2},
)
result = assert_proper_response_with_result(response)
all_addrs = list(set(addrs_1) | set(addrs_2))
expected_resp_2 = {
addrs_2[0]: 'rotki.eth',
addrs_2[1]: 'abc.eth',
}
assert result == expected_resp_2
timestamps_after_request = _get_timestamps(db, addrs_1)
assert timestamps_before_request == timestamps_after_request
# Going to check that after request with ignore_cache ens_mappings will be updated
db_changes_before = db_conn.total_changes
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': ['0xqwerty']},
)
assert_error_response(
response=response,
contained_in_msg='Given value 0xqwerty is not an ethereum address',
status_code=HTTPStatus.BAD_REQUEST,
)
requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': all_addrs, 'ignore_cache': True},
)
db_changes_after = db_conn.total_changes
# Check that we have 5 updates because we have 5 rows in ens_mappings table
assert db_changes_after == 5 + db_changes_before
|
def test_reverse_ens(rotkehlchen_api_server):
"""Test that we can reverse resolve ENS names"""
db = DBEns(rotkehlchen_api_server.rest_api.rotkehlchen.data.db)
db_conn = rotkehlchen_api_server.rest_api.rotkehlchen.data.db.conn
addrs_1 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0x2b888954421b424c5d3d9ce9bb67c9bd47537d12'),
]
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_1},
)
result = assert_proper_response_with_result(response)
expected_resp_1 = {
addrs_1[0]: 'rotki.eth',
addrs_1[1]: 'lefteris.eth',
}
assert result == expected_resp_1
addrs_2 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0xa4b73b39f73f73655e9fdc5d167c21b3fa4a1ed6'),
to_checksum_address('0x71C7656EC7ab88b098defB751B7401B5f6d8976F'),
]
timestamps_before_request = _get_timestamps(db, addrs_1)
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_2},
)
result = assert_proper_response_with_result(response)
all_addrs = list(set(addrs_1) | set(addrs_2))
expected_resp_2 = {
addrs_2[0]: 'rotki.eth',
addrs_2[1]: 'abc.eth',
}
assert result == expected_resp_2
timestamps_after_request = _get_timestamps(db, addrs_1)
assert timestamps_before_request == timestamps_after_request
# Going to check that after request with ignore_cache ens_mappings will be updated
db_changes_before = db_conn.total_changes
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': ['0xqwerty']},
)
assert_error_response(
response=response,
contained_in_msg='Given value 0xqwerty is not an ethereum address',
status_code=HTTPStatus.BAD_REQUEST,
)
requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': all_addrs, 'ignore_cache': True},
)
db_changes_after = db_conn.total_changes
# Check that we have 5 updates because we have 5 rows in ens_mappings table
assert db_changes_after == 5 + db_changes_before, 'should have made 5 DB changes'
|
55,358 |
def parse_tracking_example(example, dataset_ndims,
dtype=tf.float32):
X_names = ['app', 'cent', 'morph', 'adj']
y_names = ['temp_adj']
sparse_names = ['adj', 'temp_adj']
full_name_dict = {'app': 'appearances',
'cent': 'centroids',
'morph': 'morphologies',
'adj': 'adj_matrices',
'temp_adj': 'temporal_adj_matrices'}
# Recreate the example structure
data = {}
shape_strings_dict = {}
shapes_dict = {}
for key in dataset_ndims:
if 'shape' in key:
new_key = '_'.join(key.split('_')[0:-1])
shapes_dict[new_key] = dataset_ndims[key]
for key in shapes_dict:
dataset_ndims.pop(key + '_shape')
for key in dataset_ndims:
if key in sparse_names:
data[key] = tf.io.SparseFeature(value_key=key + '_val',
index_key=[key + '_ind_' + str(i)
for i in range(dataset_ndims[key])],
size=shapes_dict[key],
dtype=tf.float32)
else:
data[key] = tf.io.FixedLenFeature([], tf.string)
shape_strings = [key + '_shape_' + str(i)
for i in range(dataset_ndims[key])]
shape_strings_dict[key] = shape_strings
for ss in shape_strings:
data[ss] = tf.io.FixedLenFeature([], tf.int64)
# Get data
content = tf.io.parse_single_example(example, data)
X_dict = {}
y_dict = {}
for key in dataset_ndims:
# Get the feature and reshape
if key in sparse_names:
value = content[key]
else:
shape = [content[ss] for ss in shape_strings_dict[key]]
value = content[key]
value = tf.io.parse_tensor(value, out_type=dtype)
value = tf.reshape(value, shape=shape)
if key in X_names:
X_dict[full_name_dict[key]] = value
else:
y_dict[full_name_dict[key]] = value
return X_dict, y_dict
|
def parse_tracking_example(example, dataset_ndims,
dtype=tf.float32):
X_names = ['app', 'cent', 'morph', 'adj']
y_names = ['temp_adj']
sparse_names = ['adj', 'temp_adj']
full_name_dict = {'app': 'appearances',
'cent': 'centroids',
'morph': 'morphologies',
'adj': 'adj_matrices',
'temp_adj': 'temporal_adj_matrices'}
# Recreate the example structure
data = {}
shape_strings_dict = {}
shapes_dict = {}
for key in dataset_ndims:
if 'shape' in key:
new_key = '_'.join(key.split('_')[0:-1])
shapes_dict[new_key] = dataset_ndims[key]
for key in shapes_dict:
dataset_ndims.pop(key + '_shape')
for key in dataset_ndims:
if key in sparse_names:
data[key] = tf.io.SparseFeature(value_key='{}_val'.format(key),
index_key=['{}_ind_{}'.format(key, i)
for i in range(dataset_ndims[key])],
size=shapes_dict[key],
dtype=tf.float32)
else:
data[key] = tf.io.FixedLenFeature([], tf.string)
shape_strings = [key + '_shape_' + str(i)
for i in range(dataset_ndims[key])]
shape_strings_dict[key] = shape_strings
for ss in shape_strings:
data[ss] = tf.io.FixedLenFeature([], tf.int64)
# Get data
content = tf.io.parse_single_example(example, data)
X_dict = {}
y_dict = {}
for key in dataset_ndims:
# Get the feature and reshape
if key in sparse_names:
value = content[key]
else:
shape = [content[ss] for ss in shape_strings_dict[key]]
value = content[key]
value = tf.io.parse_tensor(value, out_type=dtype)
value = tf.reshape(value, shape=shape)
if key in X_names:
X_dict[full_name_dict[key]] = value
else:
y_dict[full_name_dict[key]] = value
return X_dict, y_dict
|
14,610 |
def main(argv=None):
"""
Handles command line arguments and gets things started.
Parameters
----------
argv : list of str
List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
"""
# Get command line arguments
parser = argparse.ArgumentParser(
description="Loads a trained model and outputs predictions based \
on input feature files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('model_file',
help='Model file to load and use for generating \
predictions.')
parser.add_argument('input_file',
help='A csv file, json file, or megam file \
(with or without the label column), \
with the appropriate suffix.',
nargs='+')
parser.add_argument('-i', '--id_col',
help='Name of the column which contains the instance \
IDs in ARFF, CSV, or TSV files.',
default='id')
parser.add_argument('-l', '--label_col',
help='Name of the column which contains the labels\
in ARFF, CSV, or TSV files. For ARFF files, this\
must be the final column to count as the label.',
default='y')
parser.add_argument('-p', '--positive_label',
help="If the model is only being used to predict the \
probability of a particular label, this \
specifies the index of the label we're \
predicting. 1 = second label, which is default \
for binary classification. Keep in mind that \
labels are sorted lexicographically.",
default=1, type=int)
parser.add_argument('-q', '--quiet',
help='Suppress printing of "Loading..." messages.',
action='store_true')
parser.add_argument('--output_file', '-o',
help="Path to output tsv file. If not specified, "
"predictions will be printed to stdout.")
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
probability_handling = parser.add_mutually_exclusive_group()
probability_handling.add_argument('-t', '--threshold',
help="If the model we're using is "
"generating probabilities of the "
"positive label, return 1 if it "
"meets/exceeds the given threshold "
"and 0 otherwise.", type=float)
probability_handling.add_argument('--all_probabilities', '-a',
action='store_true',
help="Flag indicating whether to output "
"the probabilities of all labels "
"instead of just the probability "
"of the positive label.")
args = parser.parse_args(argv)
# Make warnings from built-in warnings module get formatted more nicely
logging.captureWarnings(True)
logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' +
'%(message)s'))
logger = logging.getLogger(__name__)
# Create the classifier and load the model
predictor = Predictor(args.model_file,
positive_label=args.positive_label,
threshold=args.threshold,
return_all_probabilities=args.all_probabilities,
logger=logger)
# Iterate over all the specified input files
for i, input_file in enumerate(args.input_file):
# make sure each file extension is one we can process
input_extension = os.path.splitext(input_file)[1].lower()
if input_extension not in EXT_TO_READER:
logger.error(('Input file must be in either .arff, .csv, '
'.jsonlines, .libsvm, .megam, .ndj, or .tsv format. '
' Skipping file {}').format(input_file))
continue
else:
# Iterate through input file and collect the information we need
reader = EXT_TO_READER[input_extension](input_file,
quiet=args.quiet,
label_col=args.label_col,
id_col=args.id_col)
feature_set = reader.read()
preds = predictor.predict(feature_set)
header = predictor.output_file_header
if args.output_file is not None:
with open(args.output_file, "a") as fout:
if i == 0: # Only write header once per set of input files
print("\t".join(header), file=fout)
if args.all_probabilities:
for i, probabilities in enumerate(preds):
id_ = feature_set.ids[i]
probs_str = "\t".join([str(p) for p in probabilities])
print("{}\t{}".format(id_, probs_str), file=fout)
else:
for i, pred in enumerate(preds):
id_ = feature_set.ids[i]
print("{}\t{}".format(id_, pred), file=fout)
else:
if i == 0: # Only write header once per set of input files
print("\t".join(header))
if args.all_probabilities:
for i, probabilities in enumerate(preds):
id_ = feature_set.ids[i]
probs_str = "\t".join([str(p) for p in probabilities])
print("{}\t{}".format(id_, probs_str))
else:
for i, pred in enumerate(preds):
id_ = feature_set.ids[i]
print("{}\t{}".format(id_, pred))
|
def main(argv=None):
"""
Handles command line arguments and gets things started.
Parameters
----------
argv : list of str
List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
"""
# Get command line arguments
parser = argparse.ArgumentParser(
description="Loads a trained model and outputs predictions based \
on input feature files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('model_file',
help='Model file to load and use for generating \
predictions.')
parser.add_argument('input_file',
help='A csv file, json file, or megam file \
(with or without the label column), \
with the appropriate suffix.',
nargs='+')
parser.add_argument('-i', '--id_col',
help='Name of the column which contains the instance \
IDs in ARFF, CSV, or TSV files.',
default='id')
parser.add_argument('-l', '--label_col',
help='Name of the column which contains the labels\
in ARFF, CSV, or TSV files. For ARFF files, this\
must be the final column to count as the label.',
default='y')
parser.add_argument('-p', '--positive_label',
help="If the model is only being used to predict the \
probability of a particular label, this \
specifies the index of the label we're \
predicting. 1 = second label, which is default \
for binary classification. Keep in mind that \
labels are sorted lexicographically.",
default=1, type=int)
parser.add_argument('-q', '--quiet',
help='Suppress printing of "Loading..." messages.',
action='store_true')
parser.add_argument('--output_file', '-o',
help="Path to output tsv file. If not specified, "
"predictions will be printed to stdout.")
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
probability_handling = parser.add_mutually_exclusive_group()
probability_handling.add_argument('-t', '--threshold',
help="If the model we're using is "
"generating probabilities of the "
"positive label, return 1 if it "
"meets/exceeds the given threshold "
"and 0 otherwise.", type=float)
probability_handling.add_argument('--all_probabilities', '-a',
action='store_true',
help="Flag indicating whether to output "
"the probabilities of all labels "
"instead of just the probability "
"of the positive label.")
args = parser.parse_args(argv)
# Make warnings from built-in warnings module get formatted more nicely
logging.captureWarnings(True)
logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' +
'%(message)s'))
logger = logging.getLogger(__name__)
# Create the classifier and load the model
predictor = Predictor(args.model_file,
positive_label=args.positive_label,
threshold=args.threshold,
return_all_probabilities=args.all_probabilities,
logger=logger)
# Iterate over all the specified input files
for i, input_file in enumerate(args.input_file):
# make sure each file extension is one we can process
input_extension = os.path.splitext(input_file)[1].lower()
if input_extension not in EXT_TO_READER:
logger.error(('Input file must be in either .arff, .csv, '
'.jsonlines, .libsvm, .megam, .ndj, or .tsv format. '
' Skipping file {}').format(input_file))
continue
else:
# Iterate through input file and collect the information we need
reader = EXT_TO_READER[input_extension](input_file,
quiet=args.quiet,
label_col=args.label_col,
id_col=args.id_col)
feature_set = reader.read()
preds = predictor.predict(feature_set)
header = predictor.output_file_header
if args.output_file is not None:
with open(args.output_file, "a") as outputfh:
if i == 0: # Only write header once per set of input files
print("\t".join(header), file=fout)
if args.all_probabilities:
for i, probabilities in enumerate(preds):
id_ = feature_set.ids[i]
probs_str = "\t".join([str(p) for p in probabilities])
print("{}\t{}".format(id_, probs_str), file=fout)
else:
for i, pred in enumerate(preds):
id_ = feature_set.ids[i]
print("{}\t{}".format(id_, pred), file=fout)
else:
if i == 0: # Only write header once per set of input files
print("\t".join(header))
if args.all_probabilities:
for i, probabilities in enumerate(preds):
id_ = feature_set.ids[i]
probs_str = "\t".join([str(p) for p in probabilities])
print("{}\t{}".format(id_, probs_str))
else:
for i, pred in enumerate(preds):
id_ = feature_set.ids[i]
print("{}\t{}".format(id_, pred))
|
32,390 |
def main():
try:
files = demisto.getArg('files')
branch_name = demisto.getArg('branch')
pack_name = demisto.getArg('pack')
user = demisto.getArg('user')
comment = demisto.getArg('comment')
if not comment:
comment = ''
username = user.get('username')
if user.get('email'):
username = f'{username} ({user.get("email")})'
# commit the files from the input
for file in files:
if file.get('Unzipped'):
continue
# create ContentFile item
content_file = ContentFile(pack_name=pack_name, file=file)
if content_file.content_type in ('automation', 'integration'):
# split automation file to yml and script files
yml_file, script_file = split_yml_file(content_file)
commit_content_item(branch_name, yml_file)
commit_content_item(branch_name, script_file)
else:
commit_content_item(branch_name, content_file)
inciden_url = demisto.demistoUrls().get('investigation')
# create the PR text
pr_body = PR_TEMPLATE.format(username, pack_name, branch_name, inciden_url, comment)
if new_files:
pr_body = f'{pr_body}\n\n### New files\n- '
pr_body = pr_body + '\n- '.join(new_files)
if modified_files:
pr_body = f'{pr_body}\n\n### Modified files\n- '
pr_body = pr_body + '\n- '.join(modified_files)
return_results(CommandResults(
readable_output=pr_body,
outputs_prefix='PR_text',
outputs=pr_body
))
except Exception as ex:
demisto.error(str(ex)) # print the traceback
return_error(f'Failed to execute CommitFiles script. Error: {str(traceback.format_exc())}')
|
def main():
try:
files = demisto.getArg('files')
branch_name = demisto.getArg('branch')
pack_name = demisto.getArg('pack')
user = demisto.getArg('user')
comment = demisto.getArg('comment')
if not comment:
comment = ''
username = user.get('username')
if user.get('email'):
username = f'{username} ({user.get("email")})'
# commit the files from the input
for file in files:
if file.get('Unzipped'):
continue
# create ContentFile item
content_file = ContentFile(pack_name=pack_name, file=file)
if content_file.content_type in ('automation', 'integration'):
# split automation file to yml and script files
yml_file, script_file = split_yml_file(content_file)
commit_content_item(branch_name, yml_file)
commit_content_item(branch_name, script_file)
else:
commit_content_item(branch_name, content_file)
incident_url = demisto.demistoUrls().get('investigation')
# create the PR text
pr_body = PR_TEMPLATE.format(username, pack_name, branch_name, inciden_url, comment)
if new_files:
pr_body = f'{pr_body}\n\n### New files\n- '
pr_body = pr_body + '\n- '.join(new_files)
if modified_files:
pr_body = f'{pr_body}\n\n### Modified files\n- '
pr_body = pr_body + '\n- '.join(modified_files)
return_results(CommandResults(
readable_output=pr_body,
outputs_prefix='PR_text',
outputs=pr_body
))
except Exception as ex:
demisto.error(str(ex)) # print the traceback
return_error(f'Failed to execute CommitFiles script. Error: {str(traceback.format_exc())}')
|
43,681 |
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0` or :math:`1` terms, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
57,767 |
def apply_security_profile_command(profile_name: str, profile_type: str, rule_name: str, pre_post: str = None):
if DEVICE_GROUP: # Panorama instance
if not pre_post:
raise Exception('Please provide the pre_post argument when applying profiles to rules in '
'Panorama instance.')
panorama_xpath = f"{XPATH_RULEBASE}{pre_post}/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}",
apply_security_profile(panorama_xpath, profile_name)
return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')
else: # firewall instance
firewall_xpath = f"{XPATH_RULEBASE}rulebase/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}"
apply_security_profile(firewall_xpath, profile_name)
return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')
|
def apply_security_profile_command(profile_name: str, profile_type: str, rule_name: str, pre_post: str = None):
if DEVICE_GROUP: # Panorama instance
if not pre_post:
raise Exception('Please provide the pre_post argument when applying profiles to rules in '
'Panorama instance.')
panorama_xpath = f"{XPATH_RULEBASE}{pre_post}/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}",
apply_security_profile(panorama_xpath, profile_name)
return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')
else: # firewall instance
firewall_xpath = f"{XPATH_RULEBASE}rulebase/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}"
apply_security_profile(firewall_xpath, profile_name)
return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')
|
31,967 |
def list_groups_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""Lists all groups and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
order_by = args.get('order_by')
next_link = args.get('next_link')
top = args.get('top')
filter_ = args.get('filter')
groups = client.list_groups(order_by, next_link, top, filter_)
groups_readable, groups_outputs = parse_outputs(groups['value'])
next_link_response = ''
if '@odata.nextLink' in groups:
next_link_response = groups['@odata.nextLink']
if next_link_response:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}NextLink:': {'GroupsNextLink': next_link_response},
f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups (Note that there are more results. Please use the GroupsNextLink argument to see them.):'
else:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups:'
human_readable = tableToMarkdown(name=title, t=groups_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail'],
removeNull=True)
return human_readable, entry_context, groups
|
def list_groups_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""Lists all groups and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
order_by = args.get('order_by')
next_link = args.get('next_link')
top = args.get('top')
filter_ = args.get('filter')
groups = client.list_groups(order_by, next_link, top, filter_)
groups_readable, groups_outputs = parse_outputs(groups['value'])
next_link_response = ''
if '@odata.nextLink' in groups:
next_link_response = groups['@odata.nextLink']
if next_link_response:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}NextLink': {'GroupsNextLink': next_link_response},
f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups (Note that there are more results. Please use the GroupsNextLink argument to see them.):'
else:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups:'
human_readable = tableToMarkdown(name=title, t=groups_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail'],
removeNull=True)
return human_readable, entry_context, groups
|
7,953 |
def test_external_dagmc(cpp_driver, model):
harness = ExternalDAGMCTest(cpp_driver,'statepoint.5.h5',model)
harness.main()
|
def test_external_dagmc(cpp_driver, model):
harness = ExternalDAGMCTest(cpp_driver, 'statepoint.5.h5', model)
harness.main()
|
55,387 |
def test_parse_tf_serving_input():
# instances are correctly aggregated to dict of input name -> tensor
tfserving_input = {
"instances": [
{"a": "s1", "b": 1, "c": [1, 2, 3]},
{"a": "s2", "b": 2, "c": [4, 5, 6]},
{"a": "s3", "b": 3, "c": [7, 8, 9]},
]
}
result = pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert (result["a"] == np.array(["s1", "s2", "s3"])).all()
assert (result["b"] == np.array([1, 2, 3])).all()
assert (result["c"] == np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])).all()
# input is bad if a column value is missing for a row/instance
tfserving_input = {
"instances": [
{"a": "s1", "b": 1},
{"a": "s2", "b": 2, "c": [4, 5, 6]},
{"a": "s3", "b": 3, "c": [7, 8, 9]},
]
}
with pytest.raises(MlflowException) as ex:
pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert "The length of values for each input/column name are not the same" in str(ex)
# values for each column are properly converted to a tensor
arr = [
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[3, 2, 1], [6, 5, 4], [9, 8, 7]],
]
tfserving_input = {"instances": arr}
result = pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert result.shape == (2, 3, 3)
assert (result == np.array(arr)).all()
# input data specified via "inputs" must be a dictionary
tfserving_input = {"inputs": arr}
with pytest.raises(MlflowException) as ex:
pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert "Failed to parse data as TF serving input." in str(ex)
# input can be provided in column format
tfserving_input = {
"inputs": {"a": ["s1", "s2", "s3"], "b": [1, 2, 3], "c": [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}
}
result = pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert (result["a"] == np.array(["s1", "s2", "s3"])).all()
assert (result["b"] == np.array([1, 2, 3])).all()
assert (result["c"] == np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])).all()
# cannot specify both instance and inputs
tfserving_input = {
"instances": arr,
"inputs": {"a": ["s1", "s2", "s3"], "b": [1, 2, 3], "c": [[1, 2, 3], [4, 5, 6], [7, 8, 9]]},
}
with pytest.raises(MlflowException) as ex:
pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert (
'Both "instances" and "inputs" were specified. A request can have either but not both'
in str(ex)
)
|
def test_parse_tf_serving_input():
# instances are correctly aggregated to dict of input name -> tensor
tfserving_input = {
"instances": [
{"a": "s1", "b": 1, "c": [1, 2, 3]},
{"a": "s2", "b": 2, "c": [4, 5, 6]},
{"a": "s3", "b": 3, "c": [7, 8, 9]},
]
}
result = pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert (result["a"] == np.array(["s1", "s2", "s3"])).all()
assert (result["b"] == np.array([1, 2, 3])).all()
assert (result["c"] == np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])).all()
# input is bad if a column value is missing for a row/instance
tfserving_input = {
"instances": [
{"a": "s1", "b": 1},
{"a": "s2", "b": 2, "c": [4, 5, 6]},
{"a": "s3", "b": 3, "c": [7, 8, 9]},
]
}
with pytest.raises(MlflowException, match="The length of values for each input/column name are not the same"):
pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
# values for each column are properly converted to a tensor
arr = [
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[3, 2, 1], [6, 5, 4], [9, 8, 7]],
]
tfserving_input = {"instances": arr}
result = pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert result.shape == (2, 3, 3)
assert (result == np.array(arr)).all()
# input data specified via "inputs" must be a dictionary
tfserving_input = {"inputs": arr}
with pytest.raises(MlflowException) as ex:
pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert "Failed to parse data as TF serving input." in str(ex)
# input can be provided in column format
tfserving_input = {
"inputs": {"a": ["s1", "s2", "s3"], "b": [1, 2, 3], "c": [[1, 2, 3], [4, 5, 6], [7, 8, 9]]}
}
result = pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert (result["a"] == np.array(["s1", "s2", "s3"])).all()
assert (result["b"] == np.array([1, 2, 3])).all()
assert (result["c"] == np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])).all()
# cannot specify both instance and inputs
tfserving_input = {
"instances": arr,
"inputs": {"a": ["s1", "s2", "s3"], "b": [1, 2, 3], "c": [[1, 2, 3], [4, 5, 6], [7, 8, 9]]},
}
with pytest.raises(MlflowException) as ex:
pyfunc_scoring_server.parse_tf_serving_input(tfserving_input)
assert (
'Both "instances" and "inputs" were specified. A request can have either but not both'
in str(ex)
)
|
30,785 |
def map_changes_to_existing_user(existing_user, new_json):
# if existing_user is not None:
for k, v in new_json.items():
if type(v) == list:
# handle in specific way
# as of now only emails needs to be handled
if k == 'emails':
existing_email_list = existing_user.get(k)
# update
for i in v:
for j in existing_email_list:
if j.get('type') == i.get('type'):
if j.get('value') != i.get('value'):
j['value'] = i.get('value')
if i.get('primary', None) is not None:
j['primary'] = i.get('primary')
else:
if j.get('primary', None) is not None:
j['primary'] = j.get('primary')
break
# add
new_email_list = []
for i in v:
exist = False
for j in existing_email_list:
if i.get('type') == j.get('type', ''):
exist = True
break
if not exist:
new_email = {'type': i.get('type'),
'value': i.get('value')}
if i.get('primary', None) is not None:
new_email.update({'primary': i.get('primary')})
new_email_list.append(new_email)
existing_email_list.extend(new_email_list)
elif type(v) == dict:
if k != SCIM_EXTENSION_SCHEMA:
map_changes_to_existing_user(existing_user.get(k), v)
else:
existing_user[k] = v
|
def map_changes_to_existing_user(existing_user, new_json):
# if existing_user is not None:
for k, v in new_json.items():
if type(v) == list:
# handle in specific way
# as of now only emails needs to be handled
if k == 'emails':
existing_email_list = existing_user.get(k)
# update
for i in v:
for j in existing_email_list:
if j.get('type') == i.get('type'):
if j.get('value') != i.get('value'):
j['value'] = i.get('value')
if i.get('primary', None) is not None:
j['primary'] = i.get('primary')
else:
if j.get('primary', None) is not None:
j['primary'] = j.get('primary')
break
# add
new_email_list = []
for i in v:
exist = False
for existing_email in existing_email_list:
if i.get('type') == j.get('type', ''):
exist = True
break
if not exist:
new_email = {'type': i.get('type'),
'value': i.get('value')}
if i.get('primary', None) is not None:
new_email.update({'primary': i.get('primary')})
new_email_list.append(new_email)
existing_email_list.extend(new_email_list)
elif type(v) == dict:
if k != SCIM_EXTENSION_SCHEMA:
map_changes_to_existing_user(existing_user.get(k), v)
else:
existing_user[k] = v
|
3,895 |
def bellman_ford_predecessor_and_distance(
G, source, target=None, weight="weight", heuristic=False
):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of $O(mn)$ where $n$ is the number of
nodes and $m$ is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
If a negative cycle is detected, you can use :func:`find_negative_cycle`
to return the cycle and examine it. Shoftest paths are not defined when
a negative cycle exists because once reached, the path can cycle forever
to build up arbitrarily low weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
target : node label, optional
Ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a hopefully negligible cost.
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cycle.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> G[1][2]["weight"] = -7
>>> nx.bellman_ford_predecessor_and_distance(G, 0)
Traceback (most recent call last):
...
networkx.exception.NetworkXUnbounded: Negative cycle detected.
See Also
--------
find_negative_cycle
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative (di)cycle, it
will not be detected.
In NetworkX v2.1 and prior, the source node had predecessor `[None]`.
In NetworkX v2.2 this changed to the source node having predecessor `[]`
"""
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cycle detected.")
dist = {source: 0}
pred = {source: []}
if len(G) == 1:
return pred, dist
weight = _weight_function(G, weight)
dist = _bellman_ford(
G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic
)
return (pred, dist)
|
def bellman_ford_predecessor_and_distance(
G, source, target=None, weight="weight", heuristic=False
):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of $O(mn)$ where $n$ is the number of
nodes and $m$ is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
If a negative cycle is detected, you can use :func:`find_negative_cycle`
to return the cycle and examine it. Shortest paths are not defined when
a negative cycle exists because once reached, the path can cycle forever
to build up arbitrarily low weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
target : node label, optional
Ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a hopefully negligible cost.
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cycle.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> G[1][2]["weight"] = -7
>>> nx.bellman_ford_predecessor_and_distance(G, 0)
Traceback (most recent call last):
...
networkx.exception.NetworkXUnbounded: Negative cycle detected.
See Also
--------
find_negative_cycle
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative (di)cycle, it
will not be detected.
In NetworkX v2.1 and prior, the source node had predecessor `[None]`.
In NetworkX v2.2 this changed to the source node having predecessor `[]`
"""
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cycle detected.")
dist = {source: 0}
pred = {source: []}
if len(G) == 1:
return pred, dist
weight = _weight_function(G, weight)
dist = _bellman_ford(
G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic
)
return (pred, dist)
|
32,556 |
def securerank_to_dbotscore(sr):
# converts cisco umbrella score to dbotscore
DBotScore = 0
if sr is not None:
if SUSPICOUS_THRESHOLD < sr <= 100:
DBotScore = 1
elif MALICIOUS_THRESHOLD < sr <= SUSPICOUS_THRESHOLD:
DBotScore = 2
elif sr <= MALICIOUS_THRESHOLD:
DBotScore = 3
return DBotScore
|
def securerank_to_dbotscore(sr):
# converts cisco umbrella score to dbotscore
DBotScore = 0
if sr is not None:
if SUSPICOUS_THRESHOLD < sr <= 100:
DBotScore = 1
elif MALICIOUS_THRESHOLD < sr <= SUSPICIOUS_THRESHOLD:
DBotScore = 2
elif sr <= MALICIOUS_THRESHOLD:
DBotScore = 3
return DBotScore
|
31,709 |
def initialize_server(host, port, secure_connection, unsecure):
"""
uses the instance configuration to initialize the LDAP server
:param host: host or ip
:type host: string
:param port: port or None
:type port: number
:param secure_connection: SSL or None
:type secure_connection: string
:param unsecure: trust any cert
:type unsecure: boolean
:return: ldap3 Server
:rtype: Server
"""
if secure_connection == "TLS":
demisto.debug("initializing sever with TLS (unsecure: {}). port: {}". format(unsecure, port or 'default(636)'))
tls = Tls(validate=ssl.CERT_NONE)
if port:
return Server(host, port=port, use_ssl=unsecure, tls=tls)
return Server(host, use_ssl=unsecure, tls=tls)
if secure_connection == "SSL":
# intialize server with ssl
# port is configured by default as 389 or as 636 for LDAPS if not specified in configuration
demisto.debug("initializing sever with ssl (unsecure: {}). port: {}". format(unsecure, port or 'default(636)'))
if not unsecure:
demisto.debug("will require server certificate.")
tls = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=os.environ.get('SSL_CERT_FILE'))
if port:
return Server(host, port=port, use_ssl=True, tls=tls)
return Server(host, use_ssl=True, tls=tls)
if port:
return Server(host, port=port, use_ssl=True)
return Server(host, use_ssl=True)
demisto.debug("initializing server without secure connection. port: {}". format(port or 'default(389)'))
if port:
return Server(host, port=port)
return Server(host)
|
def initialize_server(host, port, secure_connection, unsecure):
"""
uses the instance configuration to initialize the LDAP server
:param host: host or ip
:type host: string
:param port: port or None
:type port: number
:param secure_connection: SSL or None
:type secure_connection: string
:param unsecure: trust any cert
:type unsecure: boolean
:return: ldap3 Server
:rtype: Server
"""
if secure_connection == "TLS":
demisto.debug(f"initializing server with TLS (unsecure: {unsecure}). port: {port or 'default(636)'}")
tls = Tls(validate=ssl.CERT_NONE)
if port:
return Server(host, port=port, use_ssl=unsecure, tls=tls)
return Server(host, use_ssl=unsecure, tls=tls)
if secure_connection == "SSL":
# intialize server with ssl
# port is configured by default as 389 or as 636 for LDAPS if not specified in configuration
demisto.debug("initializing sever with ssl (unsecure: {}). port: {}". format(unsecure, port or 'default(636)'))
if not unsecure:
demisto.debug("will require server certificate.")
tls = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=os.environ.get('SSL_CERT_FILE'))
if port:
return Server(host, port=port, use_ssl=True, tls=tls)
return Server(host, use_ssl=True, tls=tls)
if port:
return Server(host, port=port, use_ssl=True)
return Server(host, use_ssl=True)
demisto.debug("initializing server without secure connection. port: {}". format(port or 'default(389)'))
if port:
return Server(host, port=port)
return Server(host)
|
35,475 |
def create_ui_commands(packer, CP, pcm_speed, hud, is_metric, idx, stock_hud):
commands = []
bus_pt = get_pt_bus(CP.carFingerprint)
radar_disabled = CP.carFingerprint in HONDA_BOSCH and CP.openpilotLongitudinalControl
bus_lkas = get_lkas_cmd_bus(CP.carFingerprint, radar_disabled)
if CP.openpilotLongitudinalControl:
if CP.carFingerprint in HONDA_BOSCH:
acc_hud_values = {
'CRUISE_SPEED': hud.v_cruise,
'ENABLE_MINI_CAR': 1,
'SET_TO_1': 1,
'HUD_LEAD': hud.car,
'HUD_DISTANCE': 3,
'ACC_ON': hud.car != 0,
'SET_TO_X1': 1,
'IMPERIAL_UNIT': int(not is_metric),
'FCM_OFF': 1,
}
else:
acc_hud_values = {
'PCM_SPEED': pcm_speed * CV.MS_TO_KPH,
'PCM_GAS': hud.pcm_accel,
'CRUISE_SPEED': hud.v_cruise,
'ENABLE_MINI_CAR': 1,
'HUD_LEAD': hud.car,
'HUD_DISTANCE': 3, # max distance setting on display
'IMPERIAL_UNIT': int(not is_metric),
'SET_ME_X01_2': 1,
'SET_ME_X01': 1,
"FCM_OFF": stock_hud["FCM_OFF"],
"FCM_OFF_2": stock_hud["FCM_OFF_2"],
"FCM_PROBLEM": stock_hud["FCM_PROBLEM"],
"ICONS": stock_hud["ICONS"],
}
commands.append(packer.make_can_msg("ACC_HUD", bus_pt, acc_hud_values, idx))
lkas_hud_values = {
'SET_ME_X41': 0x41,
'SET_ME_X48': 0x48,
'STEERING_REQUIRED': hud.steer_required,
'SOLID_LANES': hud.lanes,
'LANE_LINES': 3,
'DASHED_LANES': hud.lanes,
'BEEP': 0,
'LKAS_PROBLEM': 0,
}
if not (CP.flags & HondaFlags.BOSCH_EXT_HUD):
lkas_hud_values['SET_ME_X48'] = 0x48
if CP.flags & HondaFlags.BOSCH_EXT_HUD and not CP.openpilotLongitudinalControl:
commands.append(packer.make_can_msg('LKAS_HUD_A', bus_lkas, lkas_hud_values, idx))
commands.append(packer.make_can_msg('LKAS_HUD_B', bus_lkas, lkas_hud_values, idx))
else:
commands.append(packer.make_can_msg('LKAS_HUD', bus_lkas, lkas_hud_values, idx))
if radar_disabled and CP.carFingerprint in HONDA_BOSCH:
radar_hud_values = {
'CMBS_OFF': 0x01,
'SET_TO_1': 0x01,
}
commands.append(packer.make_can_msg('RADAR_HUD', bus_pt, radar_hud_values, idx))
if CP.carFingerprint == CAR.CIVIC_BOSCH:
commands.append(packer.make_can_msg("LEGACY_BRAKE_COMMAND", bus_pt, {}, idx))
return commands
|
def create_ui_commands(packer, CP, pcm_speed, hud, is_metric, idx, stock_hud):
commands = []
bus_pt = get_pt_bus(CP.carFingerprint)
radar_disabled = CP.carFingerprint in HONDA_BOSCH and CP.openpilotLongitudinalControl
bus_lkas = get_lkas_cmd_bus(CP.carFingerprint, radar_disabled)
if CP.openpilotLongitudinalControl:
if CP.carFingerprint in HONDA_BOSCH:
acc_hud_values = {
'CRUISE_SPEED': hud.v_cruise,
'ENABLE_MINI_CAR': 1,
'SET_TO_1': 1,
'HUD_LEAD': hud.car,
'HUD_DISTANCE': 3,
'ACC_ON': hud.car != 0,
'SET_TO_X1': 1,
'IMPERIAL_UNIT': int(not is_metric),
'FCM_OFF': 1,
}
else:
acc_hud_values = {
'PCM_SPEED': pcm_speed * CV.MS_TO_KPH,
'PCM_GAS': hud.pcm_accel,
'CRUISE_SPEED': hud.v_cruise,
'ENABLE_MINI_CAR': 1,
'HUD_LEAD': hud.car,
'HUD_DISTANCE': 3, # max distance setting on display
'IMPERIAL_UNIT': int(not is_metric),
'SET_ME_X01_2': 1,
'SET_ME_X01': 1,
"FCM_OFF": stock_hud["FCM_OFF"],
"FCM_OFF_2": stock_hud["FCM_OFF_2"],
"FCM_PROBLEM": stock_hud["FCM_PROBLEM"],
"ICONS": stock_hud["ICONS"],
}
commands.append(packer.make_can_msg("ACC_HUD", bus_pt, acc_hud_values, idx))
lkas_hud_values = {
'SET_ME_X41': 0x41,
'SET_ME_X48': 0x48,
'STEERING_REQUIRED': hud.steer_required,
'SOLID_LANES': hud.lanes,
'LANE_LINES': 3,
'DASHED_LANES': hud.lanes,
'BEEP': 0,
'LKAS_PROBLEM': 0 if frame > 200 else 1,
}
if not (CP.flags & HondaFlags.BOSCH_EXT_HUD):
lkas_hud_values['SET_ME_X48'] = 0x48
if CP.flags & HondaFlags.BOSCH_EXT_HUD and not CP.openpilotLongitudinalControl:
commands.append(packer.make_can_msg('LKAS_HUD_A', bus_lkas, lkas_hud_values, idx))
commands.append(packer.make_can_msg('LKAS_HUD_B', bus_lkas, lkas_hud_values, idx))
else:
commands.append(packer.make_can_msg('LKAS_HUD', bus_lkas, lkas_hud_values, idx))
if radar_disabled and CP.carFingerprint in HONDA_BOSCH:
radar_hud_values = {
'CMBS_OFF': 0x01,
'SET_TO_1': 0x01,
}
commands.append(packer.make_can_msg('RADAR_HUD', bus_pt, radar_hud_values, idx))
if CP.carFingerprint == CAR.CIVIC_BOSCH:
commands.append(packer.make_can_msg("LEGACY_BRAKE_COMMAND", bus_pt, {}, idx))
return commands
|
58 |
def get_readable_path(site, path, patterns, encoding=None):
"""Returns real_path and readable_path from the given path.
The patterns is a list of (path_regex, type, property_name, default_value)
tuples.
"""
def match(path):
for pat, _type, _property, default_title in patterns:
m = web.re_compile('^' + pat).match(path)
if m:
prefix = m.group()
extra = web.lstrips(path, prefix)
tokens = extra.split("/", 2)
# `extra` starts with "/". So first token is always empty.
middle = web.listget(tokens, 1, "")
suffix = web.listget(tokens, 2, "")
if suffix:
suffix = "/" + suffix
return _type, _property, default_title, prefix, middle, suffix
return None, None, None, None, None, None
_type, _property, default_title, prefix, middle, suffix = match(path)
if _type is None:
path = web.safeunicode(path)
return (path, path)
if encoding is not None \
or path.endswith(".json") or path.endswith(".yml") or path.endswith(".rdf"):
key, ext = os.path.splitext(path)
thing = _get_object(site, key)
if thing:
path = thing.key + ext
path = web.safeunicode(path)
return (path, path)
thing = _get_object(site, prefix)
# get_object may handle redirections.
if thing:
prefix = thing.key
if thing and thing.type.key == _type:
title = thing.get(_property) or default_title
try:
from urllib.parse import quote_plus
middle = '/' + quote_plus(h.urlsafe(title.strip()))
except ImportError:
middle = '/' + h.urlsafe(title.strip())
else:
middle = ""
if is_exclusion(thing):
web.ctx.exclude = True
prefix = web.safeunicode(prefix)
middle = web.safeunicode(middle)
suffix = web.safeunicode(suffix)
return (prefix + suffix, prefix + middle + suffix)
|
def get_readable_path(site, path, patterns, encoding=None):
"""Returns real_path and readable_path from the given path.
The patterns is a list of (path_regex, type, property_name, default_value)
tuples.
"""
def match(path):
for pat, _type, _property, default_title in patterns:
m = web.re_compile('^' + pat).match(path)
if m:
prefix = m.group()
extra = web.lstrips(path, prefix)
tokens = extra.split("/", 2)
# `extra` starts with "/". So first token is always empty.
middle = web.listget(tokens, 1, "")
suffix = web.listget(tokens, 2, "")
if suffix:
suffix = "/" + suffix
return _type, _property, default_title, prefix, middle, suffix
return None, None, None, None, None, None
_type, _property, default_title, prefix, middle, suffix = match(path)
if _type is None:
path = web.safeunicode(path)
return (path, path)
if encoding is not None \
or path.endswith(".json") or path.endswith(".yml") or path.endswith(".rdf"):
key, ext = os.path.splitext(path)
thing = _get_object(site, key)
if thing:
path = thing.key + ext
path = web.safeunicode(path)
return (path, path)
thing = _get_object(site, prefix)
# get_object may handle redirections.
if thing:
prefix = thing.key
if thing and thing.type.key == _type:
title = thing.get(_property) or default_title
try:
from six.moves.urllib_parse import quote_plus
middle = '/' + quote_plus(h.urlsafe(title.strip()))
except ImportError:
middle = '/' + h.urlsafe(title.strip())
else:
middle = ""
if is_exclusion(thing):
web.ctx.exclude = True
prefix = web.safeunicode(prefix)
middle = web.safeunicode(middle)
suffix = web.safeunicode(suffix)
return (prefix + suffix, prefix + middle + suffix)
|
32,120 |
def list_incidents_command(client, args, is_fetch_incidents=False, first_fetch=False):
demisto.debug("starting the list incidents command")
filter_expression = args.get('filter')
limit = None if is_fetch_incidents else min(200, int(args.get('limit')))
next_link = args.get('next_link', '')
result = None
params = {}
demisto.debug(f"is_fetch_incidents:{is_fetch_incidents}, first_fetch:{first_fetch}")
if next_link:
next_link = next_link.replace('%20', ' ') # OData syntax can't handle '%' character
result = client.http_request('GET', full_url=next_link)
elif is_fetch_incidents and not first_fetch:
params = {'$orderby': 'properties/incidentNumber asc'}
else: # it is a first fetch, or not a part of a fetch command
params = {'$orderby': 'properties/createdTimeUtc asc'}
if not result:
params.update({'$top': limit, '$filter': filter_expression})
remove_nulls_from_dictionary(params)
url_suffix = 'incidents'
result = client.http_request('GET', url_suffix, params=params)
demisto.debug(f"params:{params}")
incidents = [incident_data_to_xsoar_format(inc) for inc in result.get('value')]
demisto.debug(f"number of incidents: {len(incidents)}")
if is_fetch_incidents:
return CommandResults(outputs=incidents, outputs_prefix='AzureSentinel.Incident')
outputs = {'AzureSentinel.Incident(val.ID === obj.ID)': incidents}
update_next_link_in_context(result, outputs)
readable_output = tableToMarkdown(f'Incidents List ({len(incidents)} results)', incidents,
headers=INCIDENT_HEADERS,
headerTransform=pascalToSpace,
removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs=outputs,
raw_response=result
)
|
def list_incidents_command(client, args, is_fetch_incidents=False, first_fetch=False):
demisto.debug("starting the list incidents command")
filter_expression = args.get('filter')
limit = None if is_fetch_incidents else min(200, int(args.get('limit')))
next_link = args.get('next_link', '')
result = None
params = {}
demisto.debug(f"{is_fetch_incidents=}, {first_fetch=}")
if next_link:
next_link = next_link.replace('%20', ' ') # OData syntax can't handle '%' character
result = client.http_request('GET', full_url=next_link)
elif is_fetch_incidents and not first_fetch:
params = {'$orderby': 'properties/incidentNumber asc'}
else: # it is a first fetch, or not a part of a fetch command
params = {'$orderby': 'properties/createdTimeUtc asc'}
if not result:
params.update({'$top': limit, '$filter': filter_expression})
remove_nulls_from_dictionary(params)
url_suffix = 'incidents'
result = client.http_request('GET', url_suffix, params=params)
demisto.debug(f"params:{params}")
incidents = [incident_data_to_xsoar_format(inc) for inc in result.get('value')]
demisto.debug(f"number of incidents: {len(incidents)}")
if is_fetch_incidents:
return CommandResults(outputs=incidents, outputs_prefix='AzureSentinel.Incident')
outputs = {'AzureSentinel.Incident(val.ID === obj.ID)': incidents}
update_next_link_in_context(result, outputs)
readable_output = tableToMarkdown(f'Incidents List ({len(incidents)} results)', incidents,
headers=INCIDENT_HEADERS,
headerTransform=pascalToSpace,
removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs=outputs,
raw_response=result
)
|
31,539 |
def domain_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns domain's reputation
"""
domains = argToList(args.get('domain'))
since = arg_to_number(args.get('since'), arg_name='since')
until = arg_to_number(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for domain in domains:
try:
raw_response = client.domain(domain, since, until, limit)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process domain: "{domain}"\n {str(exception)}')
continue
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
num_of_engines, num_of_positive_engines = calculate_engines(reputation_data=data)
for data_entry in data:
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=data_entry.get('description')
)
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for domain: {domain}:', data_entry)
domain_indicator = Common.Domain(
domain=domain,
dbot_score=dbot_score,
detection_engines=num_of_engines,
positive_detections=num_of_positive_engines
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Domain',
outputs_key_field='id',
outputs=data_entry,
indicator=domain_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
else: # no data
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about domain: {domain} \n'
domain_indicator = Common.Domain(
domain=domain,
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Domain',
outputs_key_field='id',
outputs=data,
indicator=domain_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
|
def domain_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns domain's reputation
"""
domains = argToList(args.get('domain'))
since = arg_to_number(args.get('since'), arg_name='since')
until = arg_to_number(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for domain in domains:
try:
raw_response = client.domain(domain, since, until, limit)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process domain: "{domain}"\n {str(exception)}')
continue
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
num_of_engines, num_of_positive_engines = calculate_engines(reputation_data=data)
for data_entry in data:
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=data_entry.get('description')
)
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for domain {domain}', data_entry)
domain_indicator = Common.Domain(
domain=domain,
dbot_score=dbot_score,
detection_engines=num_of_engines,
positive_detections=num_of_positive_engines
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Domain',
outputs_key_field='id',
outputs=data_entry,
indicator=domain_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
else: # no data
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about domain: {domain} \n'
domain_indicator = Common.Domain(
domain=domain,
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Domain',
outputs_key_field='id',
outputs=data,
indicator=domain_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
|
34,537 |
def _load_from_module_name_in_endpoint_config(
endpoint_config: EndpointConfig,
) -> "rasa.shared.nlu.interpreter.NaturalLanguageInterpreter":
"""Instantiate an event channel based on its class name."""
try:
nlu_interpreter_class = rasa.shared.utils.common.class_from_module_path(
endpoint_config.type
)
return nlu_interpreter_class(endpoint_config=endpoint_config)
except (AttributeError, ImportError) as e:
raise Exception(
f"Could not find a class based on the module path "
f"'{endpoint_config.type}'. Failed to create a "
f"`NaturalLanguageInterpreter` instance. Error: {e}"
)
|
def _load_from_module_name_in_endpoint_config(
endpoint_config: EndpointConfig,
) -> rasa.shared.nlu.interpreter.NaturalLanguageInterpreter:
"""Instantiate an event channel based on its class name."""
try:
nlu_interpreter_class = rasa.shared.utils.common.class_from_module_path(
endpoint_config.type
)
return nlu_interpreter_class(endpoint_config=endpoint_config)
except (AttributeError, ImportError) as e:
raise Exception(
f"Could not find a class based on the module path "
f"'{endpoint_config.type}'. Failed to create a "
f"`NaturalLanguageInterpreter` instance. Error: {e}"
)
|
14,705 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up access to Netatmo cameras."""
home = config.get(CONF_HOME)
verify_ssl = config.get(CONF_VERIFY_SSL, True)
quality = config.get(CONF_QUALITY, DEFAULT_QUALITY)
import pyatmo
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
data = CameraData(hass, conf, home)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
add_entities([NetatmoCamera(data, camera_name, home,
camera_type, verify_ssl, quality)])
data.get_persons()
except pyatmo.NoDevice:
return None
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up access to Netatmo cameras."""
home = config.get(CONF_HOME)
verify_ssl = config.get(CONF_VERIFY_SSL, True)
quality = config.get(CONF_QUALITY, DEFAULT_QUALITY)
import pyatmo
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
data = CameraData(hass, auth, home)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
add_entities([NetatmoCamera(data, camera_name, home,
camera_type, verify_ssl, quality)])
data.get_persons()
except pyatmo.NoDevice:
return None
|
27,813 |
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group._addoption(
"-k",
action="store",
dest="keyword",
default="",
metavar="EXPRESSION",
help="Only run tests which match the given substring expression. "
"An expression is a Python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test_"
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other', while -k 'not test_method' "
"matches those that don't contain 'test_method' in their names. "
"-k 'not test_method and not test_other' will eliminate the matches. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them. "
"The matching is case-insensitive.",
)
group._addoption(
"-m",
action="store",
dest="markexpr",
default="",
metavar="MARKEXPR",
help="Only run tests matching given mark expression.\n"
"For example: -m 'mark1 and not mark2'.",
)
group.addoption(
"--markers",
action="store_true",
help="show markers (builtin, plugin and per-project ones).",
)
parser.addini("markers", "Markers for test functions", "linelist")
parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets")
|
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group._addoption(
"-k",
action="store",
dest="keyword",
default="",
metavar="EXPRESSION",
help="Only run tests which match the given substring expression. "
"An expression is a Python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test_"
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other', while -k 'not test_method' "
"matches those that don't contain 'test_method' in their names. "
"-k 'not test_method and not test_other' will eliminate the matches. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them. "
"The matching is case-insensitive.",
)
group._addoption(
"-m",
action="store",
dest="markexpr",
default="",
metavar="MARKEXPR",
help="Only run tests matching given mark expression. "
"For example: -m 'mark1 and not mark2'.",
)
group.addoption(
"--markers",
action="store_true",
help="show markers (builtin, plugin and per-project ones).",
)
parser.addini("markers", "Markers for test functions", "linelist")
parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets")
|
7,311 |
def crop(image, bounding_box, axis=None):
"""Cropping images from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
Bounding_box : list of 2-tuple (x, y) where x < y.
Bounding box.
axis : tuple, optional
Axis order for cropping.
if provided, same legth as bounding_box.
Default: None
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty legth of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
|
def crop(image, bounding_box, axis=None):
"""Cropping images from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
bounding_box : list of 2-tuple (x, y) where x < y
Bounding box.
axis : tuple, optional
Axis order for cropping.
if provided, same legth as bounding_box.
Default: None
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty legth of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
|
47,932 |
def render_segmentation_data(frame, objects):
# Visualizing result data over source image
return (frame / 2 + apply_color_map(objects) / 2) / 255
|
def render_segmentation_data(frame, objects):
# Visualizing result data over source image
return np.floor_divide(frame, 2) + np.floor_divide(apply_color_map(objects), 2)
|
58,743 |
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided does not exist. Created directory "
f"'{out_path}'."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
1,252 |
def main():
args, parser = parse_args()
try:
nii = nib.load(args.anatomy)
except Exception:
parser.error("Expecting anatomical image as first agument.")
for tractogram in args.tractograms:
tractogram_format = nib.streamlines.detect_format(tractogram)
if tractogram_format is not nib.streamlines.TckFile:
print(f"Skipping non TCK file: '{tractogram}'")
continue
filename, _ = os.path.splitext(tractogram)
output_filename = filename + '.trk'
if os.path.isfile(output_filename) and not args.force:
msg = (f"Skipping existing file: '{output_filename}'. "
f"Use -f to overwrite.")
print(msg)
continue
# Build header using infos from the anatomical image.
header = {}
header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
header[Field.DIMENSIONS] = nii.shape[:3]
header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))
tck = nib.streamlines.load(tractogram)
nib.streamlines.save(tck.tractogram, output_filename, header=header)
|
def main():
args, parser = parse_args()
try:
nii = nib.load(args.anatomy)
except Exception:
parser.error("Expecting anatomical image as first agument.")
for tractogram in args.tractograms:
tractogram_format = nib.streamlines.detect_format(tractogram)
if tractogram_format is not nib.streamlines.TckFile:
print(f"Skipping non TCK file: '{tractogram}'")
continue
filename, _ = os.path.splitext(tractogram)
output_filename = filename + '.trk'
if os.path.isfile(output_filename) and not args.force:
print(f"Skipping existing file: '{output_filename}'. Use -f to overwrite.")
continue
# Build header using infos from the anatomical image.
header = {}
header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
header[Field.DIMENSIONS] = nii.shape[:3]
header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))
tck = nib.streamlines.load(tractogram)
nib.streamlines.save(tck.tractogram, output_filename, header=header)
|
13,408 |
def test_04_verify_the_system_dataset_can_move_to_an_encrypted_root_dataset(request):
results = PUT("/systemdataset/", {'pool': 'boot-pool'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = PUT("/systemdataset/", {'pool': 'encrypted'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'encrypted', results.text
assert results.json()['basename'] == 'encrypted/.system', results.text
|
def test_04_verify_sysds_can_move_to_a_passphrase_encrypted_pool(request):
results = PUT("/systemdataset/", {'pool': 'boot-pool'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = PUT("/systemdataset/", {'pool': 'encrypted'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'encrypted', results.text
assert results.json()['basename'] == 'encrypted/.system', results.text
|
43,388 |
def cost(var):
# let coupling matrix be J=[1, -1]
J = np.array([1, -1])
# circuit1 function returns a numpy array of pauliz exp values
spins = circuit1(var[0], var[1])
# the expectation value of pauliZ is plus 1 for spin up and -1 for spin down
energy = -sum(J_ij * spins[i] * spins[i + 1] for i, J_ij in enumerate(J))
return energy
|
def cost(var):
# let coupling matrix be J=[1, -1]
J = np.array([1, -1])
# circuit1 function returns a numpy array of pauliz exp values
spins = circuit1(var[0], var[1])
# the expectation value of pauliZ is plus 1 for spin up and -1 for spin down
energy = -sum(J_ij * spins[i] * spins[i + 1] for i, J_ij in enumerate(J))
return energy
|
56,986 |
def main(args=None):
"""Run the typescript checks."""
parsed_args = _PARSER.parse_args(args=args)
compile_and_check_typescript(
STRICT_TSCONFIG_FILEPATH if parsed_args.strict_checks else
TSCONFIG_FILEPATH)
|
def main(args=None):
"""Run the typescript checks."""
parsed_args = _PARSER.parse_args(args=args)
compile_and_check_typescript(
STRICT_TSCONFIG_FILEPATH
if parsed_args.strict_checks else
TSCONFIG_FILEPATH
)
|
9,191 |
def serialize_in_chunks(
g: Graph,
max_triples: int = 10000,
max_file_size_kb: int = None,
file_name_stem: str = "chunk",
output_dir: Path = Path(__file__).parent,
first_file_contains_prefixes: bool = False,
):
"""
Serializes a given Graph into a series of n-triples with a given length
max_file_size_kb:
Maximum size per NT file in MB
Equivalent to ~6,000 triples
max_triples:
Maximum size per NT file in triples
Equivalent to lines in file
file_name_stem:
Prefix of each file name
e.g. "chunk" = chunk_001.nt, chunk_002.nt...
output_dir:
The directory you want the files to be written to
first_file_contains_prefixes:
The first file created is a Turtle file containing original graph prefixes
See ../test/test_tools/test_chunk_serializer.py for examples of this in use.
"""
if not output_dir.is_dir():
raise ValueError(
"If you specify an output_dir, it must actually be a directory!"
)
def _nt_row(triple):
if isinstance(triple[2], Literal):
return "%s %s %s .\n" % (
triple[0].n3(),
triple[1].n3(),
_quote_literal(triple[2]),
)
else:
return "%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), triple[2].n3())
def _quote_literal(l_):
"""
a simpler version of term.Literal.n3()
"""
encoded = _quote_encode(l_)
if l_.language:
if l_.datatype:
raise Exception("Literal has datatype AND language!")
return "%s@%s" % (encoded, l_.language)
elif l_.datatype:
return "%s^^<%s>" % (encoded, l_.datatype)
else:
return "%s" % encoded
def _quote_encode(l_):
return '"%s"' % l_.replace("\\", "\\\\").replace("\n", "\\n").replace(
'"', '\\"'
).replace("\r", "\\r")
def _start_new_file(file_no):
fp = Path(output_dir) / f"{file_name_stem}_{str(file_no).zfill(6)}.nt"
fh = open(fp, "a")
return fp, fh
def _serialize_prefixes(g):
pres = []
for k, v in g.namespace_manager.namespaces():
pres.append(f"PREFIX {k}: <{v}>")
return "\n".join(sorted(pres)) + "\n"
if first_file_contains_prefixes:
with open(Path(output_dir) / f"{file_name_stem}_000000.ttl", "w") as fh:
fh.write(_serialize_prefixes(g))
if max_file_size_kb is not None:
file_no = 1 if first_file_contains_prefixes else 0
for i, t in enumerate(g.triples((None, None, None))):
if i == 0:
fp, fh = _start_new_file(file_no)
elif os.path.getsize(fp) >= max_file_size_kb * 1000:
file_no += 1
fp, fh = _start_new_file(file_no)
fh.write(_nt_row(t))
else:
# count the triples in the graph
graph_length = len(g)
if graph_length <= max_triples:
# the graph is less than max so just NT serialize the whole thing
g.serialize(
destination=Path(output_dir) / f"{file_name_stem}_all.nt", format="nt"
)
else:
# graph_length is > max_lines, make enough files for all graph
# no_files = math.ceil(graph_length / max_triples)
file_no = 1 if first_file_contains_prefixes else 0
for i, t in enumerate(g.triples((None, None, None))):
if i % max_triples == 0:
fp, fh = _start_new_file(file_no)
file_no += 1
fh.write(_nt_row(t))
return
|
def serialize_in_chunks(
g: Graph,
max_triples: int = 10000,
max_file_size_kb: int = None,
file_name_stem: str = "chunk",
output_dir: Path = Path(__file__).parent,
write_prefixes: bool = False,
):
"""
Serializes a given Graph into a series of n-triples with a given length
max_file_size_kb:
Maximum size per NT file in MB
Equivalent to ~6,000 triples
max_triples:
Maximum size per NT file in triples
Equivalent to lines in file
file_name_stem:
Prefix of each file name
e.g. "chunk" = chunk_001.nt, chunk_002.nt...
output_dir:
The directory you want the files to be written to
first_file_contains_prefixes:
The first file created is a Turtle file containing original graph prefixes
See ../test/test_tools/test_chunk_serializer.py for examples of this in use.
"""
if not output_dir.is_dir():
raise ValueError(
"If you specify an output_dir, it must actually be a directory!"
)
def _nt_row(triple):
if isinstance(triple[2], Literal):
return "%s %s %s .\n" % (
triple[0].n3(),
triple[1].n3(),
_quote_literal(triple[2]),
)
else:
return "%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), triple[2].n3())
def _quote_literal(l_):
"""
a simpler version of term.Literal.n3()
"""
encoded = _quote_encode(l_)
if l_.language:
if l_.datatype:
raise Exception("Literal has datatype AND language!")
return "%s@%s" % (encoded, l_.language)
elif l_.datatype:
return "%s^^<%s>" % (encoded, l_.datatype)
else:
return "%s" % encoded
def _quote_encode(l_):
return '"%s"' % l_.replace("\\", "\\\\").replace("\n", "\\n").replace(
'"', '\\"'
).replace("\r", "\\r")
def _start_new_file(file_no):
fp = Path(output_dir) / f"{file_name_stem}_{str(file_no).zfill(6)}.nt"
fh = open(fp, "a")
return fp, fh
def _serialize_prefixes(g):
pres = []
for k, v in g.namespace_manager.namespaces():
pres.append(f"PREFIX {k}: <{v}>")
return "\n".join(sorted(pres)) + "\n"
if first_file_contains_prefixes:
with open(Path(output_dir) / f"{file_name_stem}_000000.ttl", "w") as fh:
fh.write(_serialize_prefixes(g))
if max_file_size_kb is not None:
file_no = 1 if first_file_contains_prefixes else 0
for i, t in enumerate(g.triples((None, None, None))):
if i == 0:
fp, fh = _start_new_file(file_no)
elif os.path.getsize(fp) >= max_file_size_kb * 1000:
file_no += 1
fp, fh = _start_new_file(file_no)
fh.write(_nt_row(t))
else:
# count the triples in the graph
graph_length = len(g)
if graph_length <= max_triples:
# the graph is less than max so just NT serialize the whole thing
g.serialize(
destination=Path(output_dir) / f"{file_name_stem}_all.nt", format="nt"
)
else:
# graph_length is > max_lines, make enough files for all graph
# no_files = math.ceil(graph_length / max_triples)
file_no = 1 if first_file_contains_prefixes else 0
for i, t in enumerate(g.triples((None, None, None))):
if i % max_triples == 0:
fp, fh = _start_new_file(file_no)
file_no += 1
fh.write(_nt_row(t))
return
|
14,412 |
def test_farnocchia_propagation_very_high_ecc_does_not_fail():
# Regression test for #1296.
r = np.array([-500, 1500, 4012.09]) << u.km
v = np.array([5021.38, -2900.7, 1000.354]) << u.km / u.s
orbit = Orbit.from_vectors(Earth, r, v, epoch=time.Time("2020-01-01"))
tofs = [74] << u.s # tof = 74s and above is the critical region.
coords = propagate(orbit, tofs)
assert not np.isnan(coords[0].get_xyz()).any()
assert not np.isnan(coords[1].get_xyz()).any()
|
def test_farnocchia_propagation_very_high_ecc_does_not_fail():
# Regression test for #1296.
r = np.array([-500, 1500, 4012.09]) << u.km
v = np.array([5021.38, -2900.7, 1000.354]) << u.km / u.s
orbit = Orbit.from_vectors(Earth, r, v, epoch=time.Time("2020-01-01"))
tofs = [74] << u.s # tof = 74s and above is the critical region.
coords = propagate(orbit, tofs)
assert not np.isnan(coords.get_xyz()).any()
|
3,352 |
def resolve_equation_list(
equations: List[str],
selected_columns: List[str],
aggregates_only: Optional[bool] = False,
auto_add: Optional[bool] = False,
) -> List[JsonQueryType]:
"""Given a list of equation strings, resolve them to their equivalent snuba json query formats"""
resolved_equations = []
resolved_columns = selected_columns[:]
for index, equation in enumerate(equations):
# only supporting 1 operation for now
parsed_equation, fields, functions = parse_arithmetic(equation, max_operators=1)
if aggregates_only and len(functions) == 0:
raise InvalidSearchQuery("Only equations on functions are supported")
for field in fields:
if field not in selected_columns:
if auto_add:
resolved_columns.append(field)
else:
raise InvalidSearchQuery(
f"{field} used in an equation but is not a selected field"
)
for function in functions:
if function not in selected_columns:
if auto_add:
resolved_columns.append(function)
else:
raise InvalidSearchQuery(
f"{function} used in an equation but is not a selected function"
)
# We just jam everything into resolved_equations because the json format can't take arithmetic in the aggregates
# field, but can do the aliases in the selected_columns field
# TODO(snql): we can do better
resolved_equations.append(parsed_equation.to_snuba_json(f"equation[{index}]"))
return resolved_equations, resolved_columns
|
def resolve_equation_list(
equations: List[str],
selected_columns: List[str],
aggregates_only: Optional[bool] = False,
auto_add: Optional[bool] = False,
) -> List[JsonQueryType]:
"""Given a list of equation strings, resolve them to their equivalent snuba json query formats"""
resolved_equations = []
resolved_columns = selected_columns[:]
for index, equation in enumerate(equations):
# only supporting 1 operation for now
parsed_equation, fields, functions = parse_arithmetic(equation, max_operators=1)
if aggregates_only and len(functions) == 0:
raise InvalidSearchQuery("Only equations on aggregate functions are supported")
for field in fields:
if field not in selected_columns:
if auto_add:
resolved_columns.append(field)
else:
raise InvalidSearchQuery(
f"{field} used in an equation but is not a selected field"
)
for function in functions:
if function not in selected_columns:
if auto_add:
resolved_columns.append(function)
else:
raise InvalidSearchQuery(
f"{function} used in an equation but is not a selected function"
)
# We just jam everything into resolved_equations because the json format can't take arithmetic in the aggregates
# field, but can do the aliases in the selected_columns field
# TODO(snql): we can do better
resolved_equations.append(parsed_equation.to_snuba_json(f"equation[{index}]"))
return resolved_equations, resolved_columns
|
20,051 |
def _get_dataset_qualities_file(did_cache_dir, dataset_id):
"""API call to load dataset qualities. Loads the from cache or downloads them.
Features are metafeatures (number of features, number of classes, ...)
This function is NOT thread/multiprocessing safe.
Parameters
----------
did_cache_dir : str
Cache subdirectory for this dataset
dataset_id : int
Dataset ID
Returns
-------
str
Path of the cached qualities file
"""
# Dataset qualities are subject to change and must be fetched every time
qualities_file = os.path.join(did_cache_dir, "qualities.xml")
try:
with io.open(qualities_file, encoding="utf8") as fh:
qualities_xml = fh.read()
except (OSError, IOError):
url_extension = "data/qualities/{}".format(dataset_id)
qualities_xml = openml._api_calls._perform_api_call(url_extension, "get")
with io.open(qualities_file, "w", encoding="utf8") as fh:
fh.write(qualities_xml)
return qualities_file
|
def _get_dataset_qualities_file(did_cache_dir, dataset_id):
"""API call to load dataset qualities. Loads from cache or downloads them.
Features are metafeatures (number of features, number of classes, ...)
This function is NOT thread/multiprocessing safe.
Parameters
----------
did_cache_dir : str
Cache subdirectory for this dataset
dataset_id : int
Dataset ID
Returns
-------
str
Path of the cached qualities file
"""
# Dataset qualities are subject to change and must be fetched every time
qualities_file = os.path.join(did_cache_dir, "qualities.xml")
try:
with io.open(qualities_file, encoding="utf8") as fh:
qualities_xml = fh.read()
except (OSError, IOError):
url_extension = "data/qualities/{}".format(dataset_id)
qualities_xml = openml._api_calls._perform_api_call(url_extension, "get")
with io.open(qualities_file, "w", encoding="utf8") as fh:
fh.write(qualities_xml)
return qualities_file
|
25,777 |
def busmap_by_hac(network, n_clusters, buses_i=None, branch_components=["Line", "Link"], feature=None):
"""
Create a busmap accroding to Hierarchical Agglomerative Clustering.
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None|pandas.Index
Subset of buses to cluster. If None, all buses are considered.
branch_components: List
Subset of all branch_components in the network. Defaults to ["Line", "Link"].
feature: None | pandas.DataFrame
Feature to be considered for the clustering.
The DataFrame must be indexed with buses_i.
If None, all buses have the same similarity.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to k-means clusters (indexed by
non-negative integers).
"""
if find_spec('sklearn') is None:
raise ModuleNotFoundError("Optional dependency 'sklearn' not found."
"Install via 'conda install -c conda-forge scikit-learn' "
"or 'pip install scikit-learn'")
from sklearn.cluster import AgglomerativeClustering as HAC
if buses_i is None:
buses_i = network.buses.index
if feature is None:
logger.warning(
"No feature is specified for Hierarchical Clustering. "
"Falling back to default, where all buses have equal similarity. "
"You can specify a feature as pandas.DataFrame indexed with buses_i."
)
feature = pd.DataFrame(index=buses_i, columns=[""], data=0)
buses_x = network.buses.index.get_indexer(buses_i)
A = network.adjacency_matrix(branch_components=branch_components).todense()
A = sp.sparse.coo_matrix(A[buses_x].T[buses_x].T)
labels = HAC(n_clusters=n_clusters,
connectivity=A,
affinity='euclidean',
linkage='ward').fit_predict(feature)
busmap = pd.Series(data=labels, index=buses_i, dtype='str')
return busmap
|
def busmap_by_hac(network, n_clusters, buses_i=None, branch_components=["Line", "Link"], feature=None):
"""
Create a busmap accroding to Hierarchical Agglomerative Clustering.
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None|pandas.Index
Subset of buses to cluster. If None, all buses are considered.
branch_components: List
Subset of all branch_components in the network. Defaults to ["Line", "Link"].
feature: None | pandas.DataFrame
Feature to be considered for the clustering.
The DataFrame must be indexed with buses_i.
If None, all buses have the same similarity.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to k-means clusters (indexed by
non-negative integers).
"""
if find_spec('sklearn') is None:
raise ModuleNotFoundError("Optional dependency 'sklearn' not found."
"Install via 'conda install -c conda-forge scikit-learn' "
"or 'pip install scikit-learn'")
from sklearn.cluster import AgglomerativeClustering as HAC
if buses_i is None:
buses_i = network.buses.index
if feature is None:
logger.warning(
"No feature is specified for Hierarchical Clustering. "
"Falling back to default, where all buses have equal similarity. "
"You can specify a feature as pandas.DataFrame indexed with buses_i."
)
feature = pd.DataFrame(index=buses_i, columns=[""], data=0)
buses_x = network.buses.index.get_indexer(buses_i)
A = network.adjacency_matrix(branch_components=branch_components).todense()
A = sp.sparse.coo_matrix(A[buses_x].T[buses_x].T)
labels = HAC(n_clusters=n_clusters,
connectivity=A,
affinity='euclidean',
linkage='ward').fit_predict(feature)
busmap = pd.Series(labels, index=buses_i, dtype=str)
return busmap
|
22,075 |
def str2regexp(string):
"""This function takes a string and returns either this string or
a python regexp object, when the string is using the syntax
/regexp[/flags].
"""
if string.startswith('/'):
string = string[1:].rsplit('/', 1)
# Enable slash-escape even if it is not necessary
string[0] = espace_slash(string[0])
if len(string) == 1:
string.append('')
string = re.compile(
string[0],
sum(getattr(re, f.upper()) for f in string[1])
)
else:
string = escape_first_slash(string)
return string
|
def str2regexp(string):
"""This function takes a string and returns either this string or
a python regexp object, when the string is using the syntax
/regexp[/flags].
"""
if string.startswith('/'):
string = string[1:].rsplit('/', 1)
# Enable slash-escape even if it is not necessary
string[0] = _escape_slash(string[0])
if len(string) == 1:
string.append('')
string = re.compile(
string[0],
sum(getattr(re, f.upper()) for f in string[1])
)
else:
string = escape_first_slash(string)
return string
|
57,704 |
def get_scripts_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]:
script_name: list = argToList(args.get('script_name'))
description: list = argToList(args.get('description'))
created_by: list = argToList(args.get('created_by'))
windows_supported = args.get('windows_supported')
linux_supported = args.get('linux_supported')
macos_supported = args.get('macos_supported')
is_high_risk = args.get('is_high_risk')
scripts = client.get_scripts(
name=script_name,
description=description,
created_by=created_by,
windows_supported=windows_supported,
linux_supported=linux_supported,
macos_supported=macos_supported,
is_high_risk=is_high_risk
)
headers: list = ['name', 'description', 'script_uid', 'modification_date', 'created_by',
'windows_supported', 'linux_supported', 'macos_supported', 'is_high_risk']
return (
tableToMarkdown(name='Scripts', t=scripts, headers=headers, removeNull=True),
{
f'{INTEGRATION_CONTEXT_BRAND}.Scripts(val.script_uid == obj.script_uid)': scripts
},
scripts
)
|
def get_scripts_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]:
script_name: list = argToList(args.get('script_name'))
description: list = argToList(args.get('description'))
created_by: list = argToList(args.get('created_by'))
windows_supported = args.get('windows_supported')
linux_supported = args.get('linux_supported')
macos_supported = args.get('macos_supported')
is_high_risk = args.get('is_high_risk')
scripts = client.get_scripts(
name=script_name,
description=description,
created_by=created_by,
windows_supported=[windows_supported],
linux_supported=[linux_supported],
macos_supported=[macos_supported],
is_high_risk=[is_high_risk]
)
headers: list = ['name', 'description', 'script_uid', 'modification_date', 'created_by',
'windows_supported', 'linux_supported', 'macos_supported', 'is_high_risk']
return (
tableToMarkdown(name='Scripts', t=scripts, headers=headers, removeNull=True),
{
f'{INTEGRATION_CONTEXT_BRAND}.Scripts(val.script_uid == obj.script_uid)': scripts
},
scripts
)
|
40,831 |
def _registry_from_region(region, registry_dict):
"""Returns the ECR registry (AWS account number) for the given region."""
available_regions = registry_dict.keys()
if region not in available_regions:
raise ValueError(
"Unsupported region: {}. "
"You may need to upgrade your SDK version (pip install -U sagemaker) newer regions. "
"Supported region(s): {}.".format(region, ", ".join(available_regions))
)
return registry_dict[region]
|
def _registry_from_region(region, registry_dict):
"""Returns the ECR registry (AWS account number) for the given region."""
available_regions = registry_dict.keys()
if region not in available_regions:
raise ValueError(
"Unsupported region: {}. "
"You may need to upgrade your SDK version (pip install -U sagemaker) for newer regions. "
"Supported region(s): {}.".format(region, ", ".join(available_regions))
)
return registry_dict[region]
|
9,361 |
def test_wrap_var_list_None():
assert not isinstance(wrap_var([None])[0], AnsibleUnsafe)
|
def test_wrap_var_list_None():
assert wrap_var([None])[0] is None
|
27,850 |
def instance_normalization(x, gamma, beta, eps=2e-5):
"""Instance normalization function.
This function implements a "instance normalization"
which normalizes each sample by its mean and standard deviation.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Batch tensors.
First dimension of this value must be the size of minibatch and
second dimension must be the number of channels.
Moreover, this value must have one or more following dimensions,
such as height and width.
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
eps (float): Epsilon value for numerical stability of normalization.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_
"""
if x.ndim <= 2:
raise ValueError('Input dimension must be grater than 2, '
'including batch size dimension '
'(first dimension).')
batch_size, channels = x.shape[:2]
original_shape = x.shape
x = reshape.reshape(x, (1, batch_size * channels) + x.shape[2:])
gamma = tile.tile(gamma, batch_size)
beta = tile.tile(beta, batch_size)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = batch_normalization.batch_normalization(x, gamma, beta, eps=eps)
x = reshape.reshape(x, original_shape)
return x
|
def instance_normalization(x, gamma, beta, eps=2e-5):
"""Instance normalization function.
This function implements a "instance normalization"
which normalizes each sample by its mean and standard deviation.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Batch tensors.
First dimension of this value must be the size of minibatch and
second dimension must be the number of channels.
Moreover, this value must have one or more following dimensions,
such as height and width.
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
eps (float): Epsilon value for numerical stability of normalization.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_
"""
if x.ndim <= 2:
raise ValueError('Input dimension must be greater than 2, '
'including batch size dimension '
'(first dimension).')
batch_size, channels = x.shape[:2]
original_shape = x.shape
x = reshape.reshape(x, (1, batch_size * channels) + x.shape[2:])
gamma = tile.tile(gamma, batch_size)
beta = tile.tile(beta, batch_size)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = batch_normalization.batch_normalization(x, gamma, beta, eps=eps)
x = reshape.reshape(x, original_shape)
return x
|
53,189 |
def instance_db_include(field, value):
return 'all'
|
def instance_db_include(field, value):
return get_default_field_value(field, value)
|
13,992 |
def discretise_spiketimes(spiketrains, sampling_rate):
"""
Rounds down all spike times in the input spike train(s)
to multiples of the sampling_rate
Parameters
----------
spiketrains : neo.SpikeTrain or list of neo.SpikeTrain
The spiketrain(s) to discretise
sampling_rate : pq.Quantity
The desired sampling rate
Returns
-------
neo.SpikeTrain or list of neo.SpikeTrain
The discretised spiketrain(s)
"""
# spiketrains type check
was_single_spiketrain = False
if isinstance(spiketrains, (neo.SpikeTrain)):
spiketrains = [spiketrains]
was_single_spiketrain = True
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, neo.SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain or a list of SpikeTrain objects,"
" not %s." % type(spiketrains))
units = spiketrains[0].times.units
mag_sampling_rate = sampling_rate.rescale(1/units).magnitude.flatten()
new_spiketrains = []
for spiketrain in spiketrains:
mag_t_start = spiketrain.t_start.rescale(units).magnitude.flatten()
mag_times = spiketrain.times.magnitude.flatten()
discrete_times = (mag_times // (1 / mag_sampling_rate)
/ mag_sampling_rate)
mask = discrete_times < mag_t_start
if np.any(mask):
warnings.warn(f'{mask.sum()} spike(s) would be before t_start '
'and are set to t_start instead.')
discrete_times[mask] = mag_t_start
discrete_times *= units
new_spiketrain = spiketrain.duplicate_with_new_data(discrete_times)
new_spiketrain.annotations = spiketrain.annotations
new_spiketrain.sampling_rate = sampling_rate
new_spiketrains.append(new_spiketrain)
if was_single_spiketrain:
new_spiketrains = new_spiketrains[0]
return new_spiketrains
|
def discretise_spiketimes(spiketrains, sampling_rate):
"""
Rounds down all spike times in the input spike train(s)
to multiples of the sampling_rate
Parameters
----------
spiketrains : neo.SpikeTrain or list of neo.SpikeTrain
The spiketrain(s) to discretise
sampling_rate : pq.Quantity
The desired sampling rate
Returns
-------
neo.SpikeTrain or list of neo.SpikeTrain
The discretised spiketrain(s)
"""
# spiketrains type check
was_single_spiketrain = False
if isinstance(spiketrains, neo.SpikeTrain):
spiketrains = [spiketrains]
was_single_spiketrain = True
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, neo.SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain or a list of SpikeTrain objects,"
" not %s." % type(spiketrains))
units = spiketrains[0].times.units
mag_sampling_rate = sampling_rate.rescale(1/units).magnitude.flatten()
new_spiketrains = []
for spiketrain in spiketrains:
mag_t_start = spiketrain.t_start.rescale(units).magnitude.flatten()
mag_times = spiketrain.times.magnitude.flatten()
discrete_times = (mag_times // (1 / mag_sampling_rate)
/ mag_sampling_rate)
mask = discrete_times < mag_t_start
if np.any(mask):
warnings.warn(f'{mask.sum()} spike(s) would be before t_start '
'and are set to t_start instead.')
discrete_times[mask] = mag_t_start
discrete_times *= units
new_spiketrain = spiketrain.duplicate_with_new_data(discrete_times)
new_spiketrain.annotations = spiketrain.annotations
new_spiketrain.sampling_rate = sampling_rate
new_spiketrains.append(new_spiketrain)
if was_single_spiketrain:
new_spiketrains = new_spiketrains[0]
return new_spiketrains
|
56,599 |
def plot_joint(
data,
group="posterior",
var_names=None,
filter_vars=None,
transform=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
joint_kwargs=None,
marginal_kwargs=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin of two variables with their respective marginals distributions.
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
group: str, optional
Specifies which :class:`arviz.InferenceData` group should be plotted. Defaults to "posterior".
var_names: str or iterable of str
Variables to be plotted. Iterable of two variables or one variable (with subset
having exactly 2 dimensions) are required. Prefix the variables by ``~`` when you
want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function)
coords: mapping, optional
Coordinates of var_names to be plotted, passed to :func:`xarray:xarray.Dataset.sel`
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
kind: str
Type of plot to display ("scatter", "kde" or "hexbin")
gridsize: int or (int, int), optional.
The number of hexagons in the x-direction. Ignored when hexbin is False. See :func:`matplotlib.pyplot.hexbin`
for details
contour: bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last: bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
joint_kwargs: dicts, optional
Additional keywords modifying the join distribution (central subplot)
marginal_kwargs: dicts, optional
Additional keywords modifying the marginals distributions (top and right subplot)
ax: tuple of axes, optional
Tuple containing (ax_joint, ax_hist_x, ax_hist_y). If None, a new figure and axes
will be created. Matplotlib axes or bokeh figures.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Defaults to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.figure` or
:func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
ax_joint: joint (central) distribution
ax_hist_x: x (top) distribution
ax_hist_y: y (right) distribution
See Also
--------
plot_pair : Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Examples
--------
Scatter Joint plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('non_centered_eight')
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='scatter',
>>> figsize=(6, 6))
Hexbin Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='hexbin',
>>> figsize=(6, 6))
KDE Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='kde',
>>> figsize=(6, 6))
Overlaid plots:
.. plot::
:context: close-figs
>>> data2 = az.load_arviz_data("centered_eight")
>>> kde_kwargs = {"contourf_kwargs": {"alpha": 0}, "contour_kwargs": {"colors": "k"}}
>>> ax = az.plot_joint(
... data, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "k"}
... )
>>> kde_kwargs["contour_kwargs"]["colors"] = "r"
>>> az.plot_joint(
... data2, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "r"}, ax=ax
... )
"""
warnings.warn("plot_joint will be deprecated. Please use plot_pair instead.")
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
data = convert_to_dataset(data, group=group)
if transform is not None:
data = transform(data)
if coords is None:
coords = {}
var_names = _var_names(var_names, data, filter_vars)
plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))
if len(plotters) != 2:
raise Exception(f"Number of variables to be plotted must 2 (you supplied {len(plotters)})")
plot_joint_kwargs = dict(
ax=ax,
figsize=figsize,
plotters=plotters,
kind=kind,
contour=contour,
fill_last=fill_last,
joint_kwargs=joint_kwargs,
gridsize=gridsize,
textsize=textsize,
marginal_kwargs=marginal_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_joint", "jointplot", backend)
axes = plot(**plot_joint_kwargs)
return axes
|
def plot_joint(
data,
group="posterior",
var_names=None,
filter_vars=None,
transform=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
joint_kwargs=None,
marginal_kwargs=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin of two variables with their respective marginals distributions.
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
group: str, optional
Specifies which :class:`arviz.InferenceData` group should be plotted. Defaults to "posterior".
var_names: str or iterable of str
Variables to be plotted. Iterable of two variables or one variable (with subset
having exactly 2 dimensions) are required. Prefix the variables by ``~`` when you
want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e. the identity function)
coords: mapping, optional
Coordinates of var_names to be plotted, passed to :func:`xarray:xarray.Dataset.sel`
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
kind: str
Type of plot to display ("scatter", "kde" or "hexbin")
gridsize: int or (int, int), optional.
The number of hexagons in the x-direction. Ignored when hexbin is False. See :func:`matplotlib.pyplot.hexbin`
for details
contour: bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last: bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
joint_kwargs: dicts, optional
Additional keywords modifying the join distribution (central subplot)
marginal_kwargs: dicts, optional
Additional keywords modifying the marginals distributions (top and right subplot)
ax: tuple of axes, optional
Tuple containing (ax_joint, ax_hist_x, ax_hist_y). If None, a new figure and axes
will be created. Matplotlib axes or bokeh figures.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Defaults to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.figure` or
:func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
ax_joint: joint (central) distribution
ax_hist_x: x (top) distribution
ax_hist_y: y (right) distribution
See Also
--------
plot_pair : Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
.. deprecated:: 0.9.0
Use :func:`arviz.plot_pair` instead.
Examples
--------
Scatter Joint plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('non_centered_eight')
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='scatter',
>>> figsize=(6, 6))
Hexbin Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='hexbin',
>>> figsize=(6, 6))
KDE Joint plot
.. plot::
:context: close-figs
>>> az.plot_joint(data,
>>> var_names=['theta'],
>>> coords={'school': ['Choate', 'Phillips Andover']},
>>> kind='kde',
>>> figsize=(6, 6))
Overlaid plots:
.. plot::
:context: close-figs
>>> data2 = az.load_arviz_data("centered_eight")
>>> kde_kwargs = {"contourf_kwargs": {"alpha": 0}, "contour_kwargs": {"colors": "k"}}
>>> ax = az.plot_joint(
... data, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "k"}
... )
>>> kde_kwargs["contour_kwargs"]["colors"] = "r"
>>> az.plot_joint(
... data2, var_names=("mu", "tau"), kind="kde", fill_last=False,
... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "r"}, ax=ax
... )
"""
warnings.warn("plot_joint will be deprecated. Please use plot_pair instead.")
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
data = convert_to_dataset(data, group=group)
if transform is not None:
data = transform(data)
if coords is None:
coords = {}
var_names = _var_names(var_names, data, filter_vars)
plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))
if len(plotters) != 2:
raise Exception(f"Number of variables to be plotted must 2 (you supplied {len(plotters)})")
plot_joint_kwargs = dict(
ax=ax,
figsize=figsize,
plotters=plotters,
kind=kind,
contour=contour,
fill_last=fill_last,
joint_kwargs=joint_kwargs,
gridsize=gridsize,
textsize=textsize,
marginal_kwargs=marginal_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_joint", "jointplot", backend)
axes = plot(**plot_joint_kwargs)
return axes
|
58,022 |
def get_blocklist(client: Client, args: dict) -> CommandResults:
"""
Retrieve the blocklist (SentinelOne Term: Blacklist)
"""
tenant_str = args.get('global', 'false')
tenant = tenant_str.lower() == 'true'
sort_by = "updatedAt"
sort_order = "desc"
skip = int(args.get('skip', "0"))
limit = int(args.get('limit', "100"))
group_ids = args.get('group_ids', None)
site_ids = args.get('site_ids', None)
account_ids = args.get('account_ids', None)
contents = []
block_list = client.get_blocklist_request(tenant=tenant, group_ids=group_ids, site_ids=site_ids,
account_ids=account_ids, skip=skip, limit=limit,
sort_by=sort_by, sort_order=sort_order)
for block in block_list:
contents.append({
'CreatedAt': block.get('createdAt'),
'Description': block.get('description'),
'ID': block.get('id'),
'OSType': block.get('osType'),
'ScopeName': block.get('scopeName'),
'ScopePath': block.get('scopePath'),
'Source': block.get('source'),
'Type': block.get('type'),
'UpdatedAt': block.get('updatedAt'),
'UserId': block.get('userId'),
'Value': block.get('value')
})
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Blocklist', contents, removeNull=True),
outputs_prefix='SentinelOne.Blocklist',
outputs_key_field='Value',
outputs=contents,
raw_response=block_list)
|
def get_blocklist(client: Client, args: dict) -> CommandResults:
"""
Retrieve the blocklist (SentinelOne Term: Blacklist)
"""
tenant_str = args.get('global', 'false')
tenant = tenant_str.lower() == 'true'
sort_by = "updatedAt"
sort_order = "desc"
offset = arg_to_number(args.get('offset', '0'))
limit = arg_to_number(args.get('limit', '0'))
group_ids = argToList(args.get('group_ids', []))
site_ids = argToList(args.get('site_ids', []))
account_ids = argToList(args.get('account_ids', []))
contents = []
block_list = client.get_blocklist_request(tenant=tenant, group_ids=group_ids, site_ids=site_ids,
account_ids=account_ids, skip=skip, limit=limit,
sort_by=sort_by, sort_order=sort_order)
for block in block_list:
contents.append({
'CreatedAt': block.get('createdAt'),
'Description': block.get('description'),
'ID': block.get('id'),
'OSType': block.get('osType'),
'ScopeName': block.get('scopeName'),
'ScopePath': block.get('scopePath'),
'Source': block.get('source'),
'Type': block.get('type'),
'UpdatedAt': block.get('updatedAt'),
'UserId': block.get('userId'),
'Value': block.get('value')
})
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Blocklist', contents, removeNull=True),
outputs_prefix='SentinelOne.Blocklist',
outputs_key_field='Value',
outputs=contents,
raw_response=block_list)
|
49,093 |
def signsimp(expr, evaluate=None):
"""Make all Add sub-expressions canonical wrt sign.
Explanation
===========
If an Add subexpression, ``a``, can have a sign extracted,
as determined by could_extract_minus_sign, it is replaced
with Mul(-1, a, evaluate=False). This allows signs to be
extracted from powers and products.
Examples
========
>>> from sympy import signsimp, exp, symbols
>>> from sympy.abc import x, y
>>> i = symbols('i', odd=True)
>>> n = -1 + 1/x
>>> n/x/(-n)**2 - 1/n/x
(-1 + 1/x)/(x*(1 - 1/x)**2) - 1/(x*(-1 + 1/x))
>>> signsimp(_)
0
>>> x*n + x*-n
x*(-1 + 1/x) + x*(1 - 1/x)
>>> signsimp(_)
0
Since powers automatically handle leading signs
>>> (-2)**i
-2**i
signsimp can be used to put the base of a power with an integer
exponent into canonical form:
>>> n**i
(-1 + 1/x)**i
By default, signsimp doesn't leave behind any hollow simplification:
if making an Add canonical wrt sign didn't change the expression, the
original Add is restored. If this is not desired then the keyword
``evaluate`` can be set to False:
>>> e = exp(y - x)
>>> signsimp(e) == e
True
>>> signsimp(e, evaluate=False)
exp(-(x - y))
"""
if evaluate is None:
evaluate = global_parameters.evaluate
expr = sympify(expr)
if not isinstance(expr, (Expr, Relational)) or expr.is_Atom:
return expr
e = sub_post(sub_pre(expr))
if not isinstance(e, (Expr, Relational)) or e.is_Atom:
return e
if e.is_Add:
if e.could_extract_minus_sign():
neg = True
e = -e
else:
neg = False
rv = e.func(*[signsimp(a, evaluate) for a in e.args])
if evaluate:
return -rv if neg else rv
return Mul(-1, rv, evaluate=False) if neg else rv
if evaluate:
e = e.xreplace({m: -(-m) for m in e.atoms(Mul) if -(-m) != m})
return e
|
def signsimp(expr, evaluate=None):
"""Make all Add sub-expressions canonical wrt sign.
Explanation
===========
If an Add subexpression, ``a``, can have a sign extracted,
as determined by could_extract_minus_sign, it is replaced
with Mul(-1, a, evaluate=False). This allows signs to be
extracted from powers and products.
Examples
========
>>> from sympy import signsimp, exp, symbols
>>> from sympy.abc import x, y
>>> i = symbols('i', odd=True)
>>> n = -1 + 1/x
>>> n/x/(-n)**2 - 1/n/x
(-1 + 1/x)/(x*(1 - 1/x)**2) - 1/(x*(-1 + 1/x))
>>> signsimp(_)
0
>>> x*n + x*-n
x*(-1 + 1/x) + x*(1 - 1/x)
>>> signsimp(_)
0
Since powers automatically handle leading signs
>>> (-2)**i
-2**i
signsimp can be used to put the base of a power with an integer
exponent into canonical form:
>>> n**i
(-1 + 1/x)**i
By default, signsimp doesn't leave behind any hollow simplification:
if making an Add canonical wrt sign didn't change the expression, the
original Add is restored. If this is not desired then the keyword
``evaluate`` can be set to False:
>>> e = exp(y - x)
>>> signsimp(e) == e
True
>>> signsimp(e, evaluate=False)
exp(-(x - y))
"""
if evaluate is None:
evaluate = global_parameters.evaluate
expr = sympify(expr)
if not isinstance(expr, (Expr, Relational)) or expr.is_Atom:
return expr
e = sub_post(sub_pre(expr))
if not isinstance(e, (Expr, Relational)) or e.is_Atom:
return e
if e.is_Add:
rv = e.func(*[signsimp(a) for a in e.args])
if not evaluate and isinstance(rv, Add) and rv.could_extract_minus_sign():
return Mul(S.NegativeOne, -rv, evaluate=False)
return rv
if evaluate:
e = e.xreplace({m: -(-m) for m in e.atoms(Mul) if -(-m) != m})
return e
|
4,565 |
def high_variance_confounds(series, n_confounds=5, percentile=2.,
detrend=True):
""" Return confounds time series extracted from series with highest
variance.
Parameters
----------
series: numpy.ndarray
Timeseries. A timeseries is a column in the "series" array.
shape (sample number, feature number)
n_confounds: int, optional
Number of confounds to return
percentile: float, optional
Highest-variance series percentile to keep before computing the
singular value decomposition, 0. <= `percentile` <= 100.
series.shape[0] * percentile / 100 must be greater than n_confounds
detrend: bool, optional
If True, detrend timeseries before processing.
Returns
-------
v: numpy.ndarray
highest variance confounds. Shape: (samples, n_confounds)
Notes
-----
This method is related to what has been published in the literature
as 'CompCor' footcite:`BEHZADI200790`.
The implemented algorithm does the following:
- compute sum of squares for each time series (no mean removal)
- keep a given percentile of series with highest variances (percentile)
- compute an svd of the extracted series
- return a given number (n_confounds) of series from the svd with
highest singular values.
References
----------
.. footbibliography::
See also
--------
nilearn.image.high_variance_confounds
"""
if detrend:
series = _detrend(series) # copy
# Retrieve the voxels|features with highest variance
# Compute variance without mean removal.
var = _mean_of_squares(series)
var_thr = np.nanpercentile(var, 100. - percentile)
series = series[:, var > var_thr] # extract columns (i.e. features)
# Return the singular vectors with largest singular values
# We solve the symmetric eigenvalue problem here, increasing stability
s, u = linalg.eigh(series.dot(series.T) / series.shape[0])
ix_ = np.argsort(s)[::-1]
u = u[:, ix_[:n_confounds]].copy()
return u
|
def high_variance_confounds(series, n_confounds=5, percentile=2.,
detrend=True):
""" Return confounds time series extracted from series with highest
variance.
Parameters
----------
series: numpy.ndarray
Timeseries. A timeseries is a column in the "series" array.
shape (sample number, feature number)
n_confounds: int, optional
Number of confounds to return
percentile: float, optional
Highest-variance series percentile to keep before computing the
singular value decomposition, 0. <= `percentile` <= 100.
series.shape[0] * percentile / 100 must be greater than n_confounds
detrend: bool, optional
If True, detrend timeseries before processing.
Returns
-------
v: numpy.ndarray
highest variance confounds. Shape: (samples, n_confounds)
Notes
-----
This method is related to what has been published in the literature
as 'CompCor' :footcite:`BEHZADI200790`.
The implemented algorithm does the following:
- compute sum of squares for each time series (no mean removal)
- keep a given percentile of series with highest variances (percentile)
- compute an svd of the extracted series
- return a given number (n_confounds) of series from the svd with
highest singular values.
References
----------
.. footbibliography::
See also
--------
nilearn.image.high_variance_confounds
"""
if detrend:
series = _detrend(series) # copy
# Retrieve the voxels|features with highest variance
# Compute variance without mean removal.
var = _mean_of_squares(series)
var_thr = np.nanpercentile(var, 100. - percentile)
series = series[:, var > var_thr] # extract columns (i.e. features)
# Return the singular vectors with largest singular values
# We solve the symmetric eigenvalue problem here, increasing stability
s, u = linalg.eigh(series.dot(series.T) / series.shape[0])
ix_ = np.argsort(s)[::-1]
u = u[:, ix_[:n_confounds]].copy()
return u
|
13,265 |
def magic(
*modules_or_functions: Union[Callable, types.ModuleType],
except_: Except = (),
style: str = "pytest",
) -> str:
"""Guess which ghostwriters to use, for a module or collection of functions.
As for all ghostwriters, the ``except_`` argument should be an
:class:`python:Exception` or tuple of exceptions, and ``style`` may be either
``"pytest"`` to write test functions or ``"unittest"`` to write test methods
and :class:`~python:unittest.TestCase`.
After finding the public functions attached to any modules, the ``magic``
ghostwriter looks for pairs of functions to pass to :func:`~roundtrip`,
then checks for :func:`~binary_operation` and :func:`~ufunc` functions,
and any others are passed to :func:`~fuzz`.
For example, try :command:`hypothesis write gzip` on the command line!
"""
except_ = _check_except(except_)
_check_style(style)
if not modules_or_functions:
raise InvalidArgument("Must pass at least one function or module to test.")
functions = set()
for thing in modules_or_functions:
if callable(thing):
functions.add(thing)
elif isinstance(thing, types.ModuleType):
if hasattr(thing, "__all__"):
funcs = [getattr(thing, name) for name in thing.__all__] # type: ignore
else:
funcs = [
v
for k, v in vars(thing).items()
if callable(v) and not k.startswith("_")
]
for f in funcs:
try:
if callable(f) and inspect.signature(f).parameters:
functions.add(f)
except ValueError:
pass
else:
raise InvalidArgument(f"Can't test non-module non-callable {thing!r}")
imports = set()
parts = []
by_name = {_get_qualname(f, include_module=True): f for f in functions}
# Look for pairs of functions that roundtrip, based on known naming patterns.
for writename, readname in ROUNDTRIP_PAIRS:
for name in sorted(by_name):
match = re.fullmatch(writename, name.split(".")[-1])
if match:
inverse_name = readname.format(*match.groups())
for other in [n for n in by_name if n.split(".")[-1] == inverse_name]:
imp, body = _make_roundtrip_body(
(by_name.pop(name), by_name.pop(other)),
except_=except_,
style=style,
)
imports |= imp
parts.append(body)
# Look for equivalent functions: same name, all required arguments of any can
# be found in all signatures, and if all have return-type annotations they match.
names = defaultdict(list)
for _, f in sorted(by_name.items()):
names[_get_qualname(f)].append(f)
for group in names.values():
if len(group) >= 2 and len({frozenset(_get_params(f)) for f in group}) == 1:
sentinel = object()
returns = {get_type_hints(f).get("return", sentinel) for f in group}
if len(returns - {sentinel}) <= 1:
imp, body = _make_equiv_body(group, except_=except_, style=style)
imports |= imp
parts.append(body)
for f in group:
by_name.pop(_get_qualname(f, include_module=True))
# Look for binary operators - functions with two identically-typed arguments,
# and the same return type. The latter restriction might be lifted later.
for name, func in sorted(by_name.items()):
hints = get_type_hints(func)
hints.pop("return", None)
if len(hints) == len(_get_params(func)) == 2:
a, b = hints.values()
if a == b:
imp, body = _make_binop_body(func, except_=except_, style=style)
imports |= imp
parts.append(body)
del by_name[name]
# Look for Numpy ufuncs or gufuncs, and write array-oriented tests for them.
if "numpy" in sys.modules:
for name, func in sorted(by_name.items()):
if _is_probably_ufunc(func):
imp, body = _make_ufunc_body(func, except_=except_, style=style)
imports |= imp
parts.append(body)
del by_name[name]
# For all remaining callables, just write a fuzz-test. In principle we could
# guess at equivalence or idempotence; but it doesn't seem accurate enough to
# be worth the trouble when it's so easy for the user to specify themselves.
for _, f in sorted(by_name.items()):
imp, body = _make_test_body(
f, test_body=_write_call(f), except_=except_, ghost="fuzz", style=style,
)
imports |= imp
parts.append(body)
return _make_test(imports, "\n".join(parts))
|
def magic(
*modules_or_functions: Union[Callable, types.ModuleType],
except_: Except = (),
style: str = "pytest",
) -> str:
"""Guess which ghostwriters to use, for a module or collection of functions.
As for all ghostwriters, the ``except_`` argument should be an
:class:`python:Exception` or tuple of exceptions, and ``style`` may be either
``"pytest"`` to write test functions or ``"unittest"`` to write test methods
and :class:`~python:unittest.TestCase`.
After finding the public functions attached to any modules, the ``magic``
ghostwriter looks for pairs of functions to pass to :func:`~roundtrip`,
then checks for :func:`~binary_operation` and :func:`~ufunc` functions,
and any others are passed to :func:`~fuzz`.
For example, try :command:`hypothesis write gzip` on the command line!
"""
except_ = _check_except(except_)
_check_style(style)
if not modules_or_functions:
raise InvalidArgument("Must pass at least one function or module to test.")
functions = set()
for thing in modules_or_functions:
if callable(thing):
functions.add(thing)
elif isinstance(thing, types.ModuleType):
if hasattr(thing, "__all__"):
funcs = [getattr(thing, name) for name in thing.__all__] # type: ignore
else:
funcs = [
v
for k, v in vars(thing).items()
if callable(v) and not k.startswith("_")
]
for f in funcs:
try:
if callable(f) and inspect.signature(f).parameters:
functions.add(f)
except ValueError:
pass
else:
raise InvalidArgument(f"Can't test non-module non-callable {thing!r}")
imports = set()
parts = []
by_name = {_get_qualname(f, include_module=True): f for f in functions}
# Look for pairs of functions that roundtrip, based on known naming patterns.
for writename, readname in ROUNDTRIP_PAIRS:
for name in sorted(by_name):
match = re.fullmatch(writename, name.split(".")[-1])
if match:
inverse_name = readname.format(*match.groups())
for other in (n for n in by_name if n.split(".")[-1] == inverse_name):
imp, body = _make_roundtrip_body(
(by_name.pop(name), by_name.pop(other)),
except_=except_,
style=style,
)
imports |= imp
parts.append(body)
# Look for equivalent functions: same name, all required arguments of any can
# be found in all signatures, and if all have return-type annotations they match.
names = defaultdict(list)
for _, f in sorted(by_name.items()):
names[_get_qualname(f)].append(f)
for group in names.values():
if len(group) >= 2 and len({frozenset(_get_params(f)) for f in group}) == 1:
sentinel = object()
returns = {get_type_hints(f).get("return", sentinel) for f in group}
if len(returns - {sentinel}) <= 1:
imp, body = _make_equiv_body(group, except_=except_, style=style)
imports |= imp
parts.append(body)
for f in group:
by_name.pop(_get_qualname(f, include_module=True))
# Look for binary operators - functions with two identically-typed arguments,
# and the same return type. The latter restriction might be lifted later.
for name, func in sorted(by_name.items()):
hints = get_type_hints(func)
hints.pop("return", None)
if len(hints) == len(_get_params(func)) == 2:
a, b = hints.values()
if a == b:
imp, body = _make_binop_body(func, except_=except_, style=style)
imports |= imp
parts.append(body)
del by_name[name]
# Look for Numpy ufuncs or gufuncs, and write array-oriented tests for them.
if "numpy" in sys.modules:
for name, func in sorted(by_name.items()):
if _is_probably_ufunc(func):
imp, body = _make_ufunc_body(func, except_=except_, style=style)
imports |= imp
parts.append(body)
del by_name[name]
# For all remaining callables, just write a fuzz-test. In principle we could
# guess at equivalence or idempotence; but it doesn't seem accurate enough to
# be worth the trouble when it's so easy for the user to specify themselves.
for _, f in sorted(by_name.items()):
imp, body = _make_test_body(
f, test_body=_write_call(f), except_=except_, ghost="fuzz", style=style,
)
imports |= imp
parts.append(body)
return _make_test(imports, "\n".join(parts))
|
1,265 |
def test_load_file_that_cannot_be_read(tmp_path):
subdir = tmp_path.joinpath("img.nii.gz")
subdir.mkdir()
with pytest.raises(ImageFileError, match="Could not read"):
load(subdir)
|
def test_load_file_that_cannot_be_read(tmp_path):
subdir = tmp_path / "img.nii.gz"
subdir.mkdir()
with pytest.raises(ImageFileError, match="Could not read"):
load(subdir)
|
20,516 |
def get_parser():
parser = argparse.ArgumentParser(
description='Perform manipulations on images (e.g., pad, change space, split along dimension). '
'Inputs can be a number, a 4d image, or several 3d images separated with ","',
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip('.py'))
mandatory = parser.add_argument_group('MANDATORY ARGUMENTS')
mandatory.add_argument(
'-i',
nargs='+',
metavar=Metavar.file,
help='Input file(s). If several inputs: separate them by white space. Example: "data.nii.gz"',
required = True)
optional = parser.add_argument_group('OPTIONAL ARGUMENTS')
optional.add_argument(
'-h',
'--help',
action='help',
help='Show this help message and exit')
optional.add_argument(
'-o',
metavar=Metavar.file,
help='Output file. Example: data_pad.nii.gz',
required = False)
image = parser.add_argument_group('IMAGE OPERATIONS')
image.add_argument(
'-pad',
metavar=Metavar.list,
help='Pad 3D image. Specify padding as: "x,y,z" (in voxel). Example: "0,0,1"',
required = False)
image.add_argument(
'-pad-asym',
metavar=Metavar.list,
help='Pad 3D image with asymmetric padding. Specify padding as: "x_i,x_f,y_i,y_f,z_i,z_f" (in voxel). '
'Example: "0,0,5,10,1,1"',
required = False)
image.add_argument(
'-split',
help='Split data along the specified dimension. The suffix _DIM+NUMBER will be added to the intput file name.',
required = False,
choices = ('x', 'y', 'z', 't'))
image.add_argument(
'-concat',
help='Concatenate data along the specified dimension',
required = False,
choices = ('x', 'y', 'z', 't'))
image.add_argument(
'-remove-vol',
metavar=Metavar.list,
help='Remove specific volumes from a 4d volume. Separate with ",". Example: "0,5,10"',
required=False)
image.add_argument(
'-keep-vol',
metavar=Metavar.list,
help='Keep specific volumes from a 4d volume (remove others). Separate with ",". Example: "1,2,3,11"',
required=False)
image.add_argument(
'-type',
help='Change file type',
required = False,
choices = ('uint8','int16','int32','float32','complex64','float64','int8','uint16','uint32','int64','uint64'))
header = parser.add_argument_group('HEADER OPERATIONS')
header.add_argument(
'-copy-header',
metavar=Metavar.file,
help='Copy the header of the source image (specified in -i) to the destination image (specified here). '
'Example: data_dest.nii.gz',
required = False)
orientation = parser.add_argument_group('ORIENTATION OPERATIONS')
orientation.add_argument(
'-getorient',
help='Get orientation of the input image',
action='store_true',
required=False)
orientation.add_argument(
'-setorient',
help='Set orientation of the input image (only modifies the header).',
choices='RIP LIP RSP LSP RIA LIA RSA LSA IRP ILP SRP SLP IRA ILA SRA SLA RPI LPI RAI LAI RPS LPS RAS LAS PRI PLI ARI ALI PRS PLS ARS ALS IPR SPR IAR SAR IPL SPL IAL SAL PIR PSR AIR ASR PIL PSL AIL ASL'.split(),
required = False)
orientation.add_argument(
'-setorient-data',
help='Set orientation of the input image\'s data (does NOT modify the header, but the data). Use with care !',
choices='RIP LIP RSP LSP RIA LIA RSA LSA IRP ILP SRP SLP IRA ILA SRA SLA RPI LPI RAI LAI RPS LPS RAS LAS PRI PLI ARI ALI PRS PLS ARS ALS IPR SPR IAR SAR IPL SPL IAL SAL PIR PSR AIR ASR PIL PSL AIL00 ASL'.split(),
required = False)
multi = parser.add_argument_group('MULTI-COMPONENT OPERATIONS ON ITK COMPOSITE WARPING FIELDS')
multi.add_argument(
'-mcs',
action='store_true',
help='Multi-component split: Split ITK warping field into three separate displacement fields. '
'The suffix _X, _Y and _Z will be added to the input file name.',
required=False)
multi.add_argument(
'-omc',
action='store_true',
help='Multi-component merge: Merge inputted images into one multi-component image. Requires several inputs.',
required=False)
warping = parser.add_argument_group('WARPING FIELD OPERATIONS:')
warping.add_argument(
'-display-warp',
action='store_true',
help='Create a grid and deform it using provided warping field.',
required=False)
multi.add_argument(
'-world2vox',
metavar=Metavar.file,
help='Transform displacement field values from world to voxel coordinate system. Input the file that will be '
'used as the input (source) by FSL applywarp function.',
required=False)
multi.add_argument(
'-to-fsl',
metavar=Metavar.file,
help="""
Transform displacement field values to absolute FSL warps. To be used with FSL\'s applywarp function with the'
`--abs` flag. Input the file that will be used as the input (source) for applywarp and optionally the target
(ref). The target file is necessary for the case where the warp is in a different space than the target. For
example, the inverse warps generated by `sct_straighten_spinalcord`.
""",
nargs = '*',
required=False)
misc = parser.add_argument_group('Misc')
misc.add_argument(
'-v',
type=int,
help='Verbose. 0: nothing. 1: basic. 2: extended.',
required=False,
default=1,
choices=(0, 1, 2))
return parser
|
def get_parser():
parser = argparse.ArgumentParser(
description='Perform manipulations on images (e.g., pad, change space, split along dimension). '
'Inputs can be a number, a 4d image, or several 3d images separated with ","',
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip('.py'))
mandatory = parser.add_argument_group('MANDATORY ARGUMENTS')
mandatory.add_argument(
'-i',
nargs='+',
metavar=Metavar.file,
help='Input file(s). If several inputs: separate them by white space. Example: "data.nii.gz"',
required = True)
optional = parser.add_argument_group('OPTIONAL ARGUMENTS')
optional.add_argument(
'-h',
'--help',
action='help',
help='Show this help message and exit')
optional.add_argument(
'-o',
metavar=Metavar.file,
help='Output file. Example: data_pad.nii.gz',
required = False)
image = parser.add_argument_group('IMAGE OPERATIONS')
image.add_argument(
'-pad',
metavar=Metavar.list,
help='Pad 3D image. Specify padding as: "x,y,z" (in voxel). Example: "0,0,1"',
required = False)
image.add_argument(
'-pad-asym',
metavar=Metavar.list,
help='Pad 3D image with asymmetric padding. Specify padding as: "x_i,x_f,y_i,y_f,z_i,z_f" (in voxel). '
'Example: "0,0,5,10,1,1"',
required = False)
image.add_argument(
'-split',
help='Split data along the specified dimension. The suffix _DIM+NUMBER will be added to the intput file name.',
required = False,
choices = ('x', 'y', 'z', 't'))
image.add_argument(
'-concat',
help='Concatenate data along the specified dimension',
required = False,
choices = ('x', 'y', 'z', 't'))
image.add_argument(
'-remove-vol',
metavar=Metavar.list,
help='Remove specific volumes from a 4d volume. Separate with ",". Example: "0,5,10"',
required=False)
image.add_argument(
'-keep-vol',
metavar=Metavar.list,
help='Keep specific volumes from a 4d volume (remove others). Separate with ",". Example: "1,2,3,11"',
required=False)
image.add_argument(
'-type',
help='Change file type',
required = False,
choices = ('uint8','int16','int32','float32','complex64','float64','int8','uint16','uint32','int64','uint64'))
header = parser.add_argument_group('HEADER OPERATIONS')
header.add_argument(
'-copy-header',
metavar=Metavar.file,
help='Copy the header of the source image (specified in -i) to the destination image (specified here). '
'Example: data_dest.nii.gz',
required = False)
orientation = parser.add_argument_group('ORIENTATION OPERATIONS')
orientation.add_argument(
'-getorient',
help='Get orientation of the input image',
action='store_true',
required=False)
orientation.add_argument(
'-setorient',
help='Set orientation of the input image (only modifies the header).',
choices='RIP LIP RSP LSP RIA LIA RSA LSA IRP ILP SRP SLP IRA ILA SRA SLA RPI LPI RAI LAI RPS LPS RAS LAS PRI PLI ARI ALI PRS PLS ARS ALS IPR SPR IAR SAR IPL SPL IAL SAL PIR PSR AIR ASR PIL PSL AIL ASL'.split(),
required = False)
orientation.add_argument(
'-setorient-data',
help='Set orientation of the input image\'s data (does NOT modify the header, but the data). Use with care !',
choices='RIP LIP RSP LSP RIA LIA RSA LSA IRP ILP SRP SLP IRA ILA SRA SLA RPI LPI RAI LAI RPS LPS RAS LAS PRI PLI ARI ALI PRS PLS ARS ALS IPR SPR IAR SAR IPL SPL IAL SAL PIR PSR AIR ASR PIL PSL AIL ASL'.split(),
required = False)
multi = parser.add_argument_group('MULTI-COMPONENT OPERATIONS ON ITK COMPOSITE WARPING FIELDS')
multi.add_argument(
'-mcs',
action='store_true',
help='Multi-component split: Split ITK warping field into three separate displacement fields. '
'The suffix _X, _Y and _Z will be added to the input file name.',
required=False)
multi.add_argument(
'-omc',
action='store_true',
help='Multi-component merge: Merge inputted images into one multi-component image. Requires several inputs.',
required=False)
warping = parser.add_argument_group('WARPING FIELD OPERATIONS:')
warping.add_argument(
'-display-warp',
action='store_true',
help='Create a grid and deform it using provided warping field.',
required=False)
multi.add_argument(
'-world2vox',
metavar=Metavar.file,
help='Transform displacement field values from world to voxel coordinate system. Input the file that will be '
'used as the input (source) by FSL applywarp function.',
required=False)
multi.add_argument(
'-to-fsl',
metavar=Metavar.file,
help="""
Transform displacement field values to absolute FSL warps. To be used with FSL\'s applywarp function with the'
`--abs` flag. Input the file that will be used as the input (source) for applywarp and optionally the target
(ref). The target file is necessary for the case where the warp is in a different space than the target. For
example, the inverse warps generated by `sct_straighten_spinalcord`.
""",
nargs = '*',
required=False)
misc = parser.add_argument_group('Misc')
misc.add_argument(
'-v',
type=int,
help='Verbose. 0: nothing. 1: basic. 2: extended.',
required=False,
default=1,
choices=(0, 1, 2))
return parser
|
1,668 |
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
|
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : "auto", bool or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
|
23,644 |
def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.get_ground_diffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
28,004 |
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('logfile',
type=str,
help="Path to the JSON compilation command database "
"files which were created during the build. "
"The analyzers will check only the files "
"registered in these build databases.")
parser.add_argument('-j', '--jobs',
type=int,
dest="jobs",
required=False,
default=1,
help="Number of threads to use in analysis. More "
"threads mean faster analysis at the cost of "
"using more memory.")
skip_mode = parser.add_mutually_exclusive_group()
skip_mode.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. "
"Please consult the User guide on how a "
"Skipfile should be laid out.")
skip_mode.add_argument('--file',
nargs='+',
dest="files",
metavar='FILE',
required=False,
default=argparse.SUPPRESS,
help="Analyze only the given file(s) not the whole "
"compilation database. Absolute directory "
"paths should start with '/', relative "
"directory paths should start with '*' and "
"it can contain path glob pattern. "
"Example: '/path/to/main.cpp', 'lib/*.cpp', "
"*/test*'.")
parser.add_argument('-o', '--output',
dest="output_path",
required=True,
default=argparse.SUPPRESS,
help="Store the analysis output in the given folder.")
parser.add_argument('--compiler-info-file',
dest="compiler_info_file",
required=False,
default=argparse.SUPPRESS,
help="Read the compiler includes and target from the "
"specified file rather than invoke the compiler "
"executable.")
parser.add_argument('--keep-gcc-include-fixed',
dest="keep_gcc_include_fixed",
required=False,
action='store_true',
default=False,
help="There are some implicit include paths which are "
"only used by GCC (include-fixed). This flag "
"determines whether these should be kept among "
"the implicit include paths.")
parser.add_argument('--keep-gcc-intrin',
dest="keep_gcc_intrin",
required=False,
action='store_true',
default=False,
help="There are some implicit include paths which "
"contain GCC-specific header files (those "
"which end with intrin.h). This flag determines "
"whether these should be kept among the implicit "
"include paths. Use this flag if Clang analysis "
"fails with error message related to __builtin "
"symbols.")
parser.add_argument('-t', '--type', '--output-format',
dest="output_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results should "
"use.")
parser.add_argument('--makefile',
dest="makefile",
required=False,
action='store_true',
default=False,
help="Generate a Makefile in the given output "
"directory from the analyzer commands and do not "
"execute the analysis. The analysis can be "
"executed by calling the make command like "
"'make -f output_dir/Makefile'. You can ignore "
"errors with the -i/--ignore-errors options: "
"'make -f output_dir/Makefile -i'.")
parser.add_argument('-q', '--quiet',
dest="quiet",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Do not print the output or error of the "
"analyzers to the standard output of "
"CodeChecker.")
parser.add_argument('-c', '--clean',
dest="clean",
required=False,
action='store_true',
default=argparse.SUPPRESS,
help="Delete analysis reports stored in the output "
"directory. (By default, CodeChecker would keep "
"reports and overwrites only those files that "
"were update by the current build command).")
parser.add_argument('--compile-uniqueing',
type=str,
dest="compile_uniqueing",
default="none",
required=False,
help="Specify the method the compilation "
"actions in the compilation database are "
"uniqued before analysis. "
"CTU analysis works properly only if "
"there is exactly one "
"compilation action per source file. "
"none(default in non CTU mode): "
"no uniqueing is done. "
"strict: no uniqueing is done, "
"and an error is given if "
"there is more than one compilation "
"action for a source file. "
"alpha(default in CTU mode): If there is more "
"than one compilation action for a source "
"file, only the one is kept that belongs to the "
"alphabetically first "
"compilation target. "
"If none of the above given, "
"this parameter should "
"be a python regular expression."
"If there is more than one compilation action "
"for a source, "
"only the one is kept which matches the "
"given python regex. If more than one "
"matches an error is given. "
"The whole compilation "
"action text is searched for match.")
parser.add_argument('--report-hash',
dest="report_hash",
default=argparse.SUPPRESS,
required=False,
choices=['context-free', 'context-free-v2'],
help="R|Specify the hash calculation method for "
"reports. By default the calculation method for "
"Clang Static Analyzer is context sensitive and "
"for Clang Tidy it is context insensitive.\n"
"You can use the following calculation methods:\n"
"- context-free: there was a bug and for Clang "
"Tidy not the context free hash was generated "
"(kept for backward compatibility).\n"
"- context-free-v2: context free hash is used "
"for ClangSA and Clang Tidy.\n"
"See the 'issue hashes' section of the help "
"message of this command below for more "
"information.\n"
"USE WISELY AND AT YOUR OWN RISK!")
parser.add_argument('-n', '--name',
dest="name",
required=False,
default=argparse.SUPPRESS,
help="Annotate the run analysis with a custom name in "
"the created metadata file.")
analyzer_opts = parser.add_argument_group("analyzer arguments")
analyzer_opts.add_argument('--analyzers',
nargs='+',
dest='analyzers',
metavar='ANALYZER',
required=False,
choices=analyzer_types.supported_analyzers,
default=argparse.SUPPRESS,
help="Run analysis only with the analyzers "
"specified. Currently supported analyzers "
"are: " +
', '.join(analyzer_types.
supported_analyzers) + ".")
analyzer_opts.add_argument('--capture-analysis-output',
dest='capture_analysis_output',
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Store standard output and standard error "
"of successful analyzer invocations "
"into the '<OUTPUT_DIR>/success' "
"directory.")
analyzer_opts.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'analyzer' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"analyzer\": [\n"
" \"--enable=core.DivideZero\",\n"
" \"--enable=core.CallAndMessage\",\n"
" \"--report-hash=context-free-v2\",\n"
" \"--verbose=debug\",\n"
" \"--clean\"\n"
" ]\n"
"}")
analyzer_opts.add_argument('--saargs',
dest="clangsa_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for the Clang Static "
"Analyzer.")
analyzer_opts.add_argument('--tidyargs',
dest="tidy_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for Clang-Tidy.")
analyzer_opts.add_argument('--tidy-config',
dest='tidy_config',
required=False,
default=argparse.SUPPRESS,
help="A file in YAML format containing the "
"configuration of clang-tidy checkers. "
"The file can be dumped by "
"'CodeChecker analyzers --dump-config "
"clang-tidy' command.")
analyzer_opts.add_argument('--analyzer-config',
dest='analyzer_config',
nargs='*',
default=["clang-tidy:HeaderFilterRegex=.*"],
help="Analyzer configuration options in the "
"following format: analyzer:key=value. "
"The collection of the options can be "
"printed with "
"'CodeChecker analyzers "
"--analyzer-config'. To disable the "
"default behaviour of this option you can "
"use the "
"'clang-tidy:take-config-from-directory="
"true' option.")
analyzer_opts.add_argument('--checker-config',
dest='checker_config',
nargs='*',
default=argparse.SUPPRESS,
help="Checker configuration options in the "
"following format: analyzer:key=value. "
"The collection of the options can be "
"printed with "
"'CodeChecker checkers --checker-config'.")
analyzer_opts.add_argument('--timeout',
type=int,
dest='timeout',
required=False,
default=argparse.SUPPRESS,
help="The amount of time (in seconds) that "
"each analyzer can spend, individually, "
"to analyze the project. If the analysis "
"of a particular file takes longer than "
"this time, the analyzer is killed and "
"the analysis is considered as a failed "
"one.")
context = analyzer_context.get_context()
clang_has_z3 = analyzer_types.is_z3_capable(context)
if clang_has_z3:
analyzer_opts.add_argument('--z3',
dest='enable_z3',
choices=['on', 'off'],
default='off',
help="Enable the z3 solver backend. This "
"allows reasoning over more complex "
"queries, but performance is worse "
"than the default range-based "
"constraint solver.")
clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context)
if clang_has_z3_refutation:
analyzer_opts.add_argument('--z3-refutation',
dest='enable_z3_refutation',
choices=['on', 'off'],
default='on' if clang_has_z3_refutation
else 'off',
help="Switch on/off the Z3 SMT Solver "
"backend to "
"reduce false positives. The results "
"of the ranged based constraint "
"solver in the Clang Static Analyzer "
"will be cross checked with the Z3 "
"SMT solver. This should not cause "
"that much of a slowdown compared to "
"using the Z3 solver only.")
if analyzer_types.is_ctu_capable(context):
ctu_opts = parser.add_argument_group(
"cross translation unit analysis arguments",
"""
These arguments are only available if the Clang Static Analyzer supports
Cross-TU analysis. By default, no CTU analysis is run when
'CodeChecker analyze' is called.""")
ctu_modes = ctu_opts.add_mutually_exclusive_group()
ctu_modes.add_argument('--ctu', '--ctu-all',
action='store_const',
const=[True, True],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform Cross Translation Unit (CTU) "
"analysis, both 'collect' and 'analyze' "
"phases. In this mode, the extra files "
"created by 'collect' are cleaned up "
"after the analysis.")
ctu_modes.add_argument('--ctu-collect',
action='store_const',
const=[True, False],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform the first, 'collect' phase of "
"Cross-TU analysis. This phase generates "
"extra files needed by CTU analysis, and "
"puts them into '<OUTPUT_DIR>/ctu-dir'. "
"NOTE: If this argument is present, "
"CodeChecker will NOT execute the "
"analyzers!")
ctu_modes.add_argument('--ctu-analyze',
action='store_const',
const=[False, True],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform the second, 'analyze' phase of "
"Cross-TU analysis, using already "
"available extra files in "
"'<OUTPUT_DIR>/ctu-dir'. (These files "
"will not be cleaned up in this mode.)")
ctu_opts.add_argument('--ctu-reanalyze-on-failure',
action='store_true',
dest='ctu_reanalyze_on_failure',
default=argparse.SUPPRESS,
help="DEPRECATED. The flag will be removed. "
"If Cross-TU analysis is enabled and fails "
"for some reason, try to re analyze the "
"same translation unit without "
"Cross-TU enabled.")
# Only check for AST loading modes if CTU is available.
if analyzer_types.is_ctu_on_demand_available(context):
ctu_opts.add_argument('--ctu-ast-mode',
action='store',
dest='ctu_ast_mode',
choices=['load-from-pch', 'parse-on-demand'],
default='load-from-pch',
help="Choose the way ASTs are loaded during "
"CTU analysis. Mode 'load-from-pch' "
"generates PCH format serialized ASTs "
"during the 'collect' phase. Mode "
"'parse-on-demand' only generates the "
"invocations needed to parse the ASTs. "
"Mode 'load-from-pch' can use "
"significant disk-space for the "
"serialized ASTs, while mode "
"'parse-on-demand' can incur some "
"runtime CPU overhead in the second "
"phase of the analysis.")
if analyzer_types.is_statistics_capable(context):
stat_opts = parser.add_argument_group(
"Statistics analysis feature arguments",
"""
These arguments are only available if the Clang Static Analyzer supports
Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck,
statisticsCollector.SpecialReturnValue checkers are available).""")
stat_opts.add_argument('--stats-collect', '--stats-collect',
action='store',
default=argparse.SUPPRESS,
dest='stats_output',
help="Perform the first, 'collect' phase of "
"Statistical analysis. This phase "
"generates extra files needed by "
"statistics analysis, and "
"puts them into "
"'<STATS_OUTPUT>'."
" NOTE: If this argument is present, "
"CodeChecker will NOT execute the "
"analyzers!")
stat_opts.add_argument('--stats-use', '--stats-use',
action='store',
default=argparse.SUPPRESS,
dest='stats_dir',
help="Use the previously generated statistics "
"results for the analysis from the given "
"'<STATS_DIR>'.")
stat_opts.add_argument('--stats',
action='store_true',
default=argparse.SUPPRESS,
dest='stats_enabled',
help="Perform both phases of "
"Statistical analysis. This phase "
"generates extra files needed by "
"statistics analysis and enables "
"the statistical checkers. "
"No need to enable them explicitly.")
stat_opts.add_argument('--stats-min-sample-count',
action='store',
default="10",
type=int,
dest='stats_min_sample_count',
help="Minimum number of samples (function call"
" occurrences) to be collected"
" for a statistics to be relevant "
"'<MIN-SAMPLE-COUNT>'.")
stat_opts.add_argument('--stats-relevance-threshold',
action='store',
default="0.85",
type=float,
dest='stats_relevance_threshold',
help="The minimum ratio of calls of function "
"f that must have a certain property "
"property to consider it true for that "
"function (calculated as calls "
"with a property/all calls)."
" CodeChecker will warn for"
" calls of f do not have that property."
"'<RELEVANCE_THRESHOLD>'.")
checkers_opts = parser.add_argument_group(
"checker configuration",
"""
Checkers
------------------------------------------------
The analyzer performs checks that are categorized into families or "checkers".
See 'CodeChecker checkers' for the list of available checkers. You can
fine-tune which checkers to use in the analysis by setting the enabled and
disabled flags starting from the bigger groups and going inwards, e.g.
'-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every
'core' checker, but only 'core.uninitialized.Assign' from the
'core.uninitialized' group. Please consult the manual for details. Disabling
certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang
community, and thus discouraged.
Compiler warnings and errors
------------------------------------------------
Compiler warnings are diagnostic messages that report constructions that are
not inherently erroneous but that are risky or suggest there may have been an
error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g.
Clang warning controlled by '-Wliteral-conversion' will be reported with check
name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to
use in the analysis by setting the enabled and disabled flags starting from the
bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will
enable every 'unused' warnings except 'unused-parameter'. These flags should
start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.:
'-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and
'-Wextra' warnings are enabled. For more information see:
https://clang.llvm.org/docs/DiagnosticsReference.html.
Sometimes GCC is more permissive than Clang, so it is possible that a specific
construction doesn't compile with Clang but compiles with GCC. These
compiler errors are also collected as CodeChecker reports as
'clang-diagnostic-error'.
Note that compiler errors and warnings are captured by CodeChecker only if it
was emitted by clang-tidy.""")
checkers_opts.add_argument('-e', '--enable',
dest="enable",
metavar='checker/group/profile',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group), "
"profile or guideline "
"to BE USED in the analysis. In case of "
"ambiguity the priority order is profile, "
"guideline, checker name (e.g. security "
"means the profile, not the checker "
"group). 'profile:' and 'guideline:' "
"prefixes can be used.")
checkers_opts.add_argument('-d', '--disable',
dest="disable",
metavar='checker/group/profile',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group), "
"profile or guideline "
"to BE PROHIBITED from use in the "
"analysis. In case of "
"ambiguity the priority order is profile, "
"guideline, checker name (e.g. security "
"means the profile, not the checker "
"group). 'profile:' and 'guideline:' "
"prefixes can be used.")
checkers_opts.add_argument('--enable-all',
dest="enable_all",
action='store_true',
required=False,
default=argparse.SUPPRESS,
help="Force the running analyzers to use "
"almost every checker available. The "
"checker groups 'alpha.', 'debug.' and "
"'osx.' (on Linux) are NOT enabled "
"automatically and must be EXPLICITLY "
"specified. WARNING! Enabling all "
"checkers might result in the analysis "
"losing precision and stability, and "
"could even result in a total failure of "
"the analysis. USE WISELY AND AT YOUR "
"OWN RISK!")
logger.add_verbose_arguments(parser)
parser.set_defaults(func=main,
func_process_config_file=process_config_file)
|
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('logfile',
type=str,
help="Path to the JSON compilation command database "
"files which were created during the build. "
"The analyzers will check only the files "
"registered in these build databases.")
parser.add_argument('-j', '--jobs',
type=int,
dest="jobs",
required=False,
default=1,
help="Number of threads to use in analysis. More "
"threads mean faster analysis at the cost of "
"using more memory.")
skip_mode = parser.add_mutually_exclusive_group()
skip_mode.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. "
"Please consult the User guide on how a "
"Skipfile should be laid out.")
skip_mode.add_argument('--file',
nargs='+',
dest="files",
metavar='FILE',
required=False,
default=argparse.SUPPRESS,
help="Analyze only the given file(s) not the whole "
"compilation database. Absolute directory "
"paths should start with '/', relative "
"directory paths should start with '*' and "
"it can contain path glob pattern. "
"Example: '/path/to/main.cpp', 'lib/*.cpp', "
"*/test*'.")
parser.add_argument('-o', '--output',
dest="output_path",
required=True,
default=argparse.SUPPRESS,
help="Store the analysis output in the given folder.")
parser.add_argument('--compiler-info-file',
dest="compiler_info_file",
required=False,
default=argparse.SUPPRESS,
help="Read the compiler includes and target from the "
"specified file rather than invoke the compiler "
"executable.")
parser.add_argument('--keep-gcc-include-fixed',
dest="keep_gcc_include_fixed",
required=False,
action='store_true',
default=False,
help="There are some implicit include paths which are "
"only used by GCC (include-fixed). This flag "
"determines whether these should be kept among "
"the implicit include paths.")
parser.add_argument('--keep-gcc-intrin',
dest="keep_gcc_intrin",
required=False,
action='store_true',
default=False,
help="There are some implicit include paths which "
"contain GCC-specific header files (those "
"which end with intrin.h). This flag determines "
"whether these should be kept among the implicit "
"include paths. Use this flag if Clang analysis "
"fails with error message related to __builtin "
"symbols.")
parser.add_argument('-t', '--type', '--output-format',
dest="output_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results should "
"use.")
parser.add_argument('--makefile',
dest="makefile",
required=False,
action='store_true',
default=False,
help="Generate a Makefile in the given output "
"directory from the analyzer commands and do not "
"execute the analysis. The analysis can be "
"executed by calling the make command like "
"'make -f output_dir/Makefile'. You can ignore "
"errors with the -i/--ignore-errors options: "
"'make -f output_dir/Makefile -i'.")
parser.add_argument('-q', '--quiet',
dest="quiet",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Do not print the output or error of the "
"analyzers to the standard output of "
"CodeChecker.")
parser.add_argument('-c', '--clean',
dest="clean",
required=False,
action='store_true',
default=argparse.SUPPRESS,
help="Delete analysis reports stored in the output "
"directory. (By default, CodeChecker would keep "
"reports and overwrites only those files that "
"were update by the current build command).")
parser.add_argument('--compile-uniqueing',
type=str,
dest="compile_uniqueing",
default="none",
required=False,
help="Specify the method the compilation "
"actions in the compilation database are "
"uniqued before analysis. "
"CTU analysis works properly only if "
"there is exactly one "
"compilation action per source file. "
"none(default in non CTU mode): "
"no uniqueing is done. "
"strict: no uniqueing is done, "
"and an error is given if "
"there is more than one compilation "
"action for a source file. "
"alpha(default in CTU mode): If there is more "
"than one compilation action for a source "
"file, only the one is kept that belongs to the "
"alphabetically first "
"compilation target. "
"If none of the above given, "
"this parameter should "
"be a python regular expression."
"If there is more than one compilation action "
"for a source, "
"only the one is kept which matches the "
"given python regex. If more than one "
"matches an error is given. "
"The whole compilation "
"action text is searched for match.")
parser.add_argument('--report-hash',
dest="report_hash",
default=argparse.SUPPRESS,
required=False,
choices=['context-free', 'context-free-v2'],
help="R|Specify the hash calculation method for "
"reports. By default the calculation method for "
"Clang Static Analyzer is context sensitive and "
"for Clang Tidy it is context insensitive.\n"
"You can use the following calculation methods:\n"
"- context-free: there was a bug and for Clang "
"Tidy not the context free hash was generated "
"(kept for backward compatibility).\n"
"- context-free-v2: context free hash is used "
"for ClangSA and Clang Tidy.\n"
"See the 'issue hashes' section of the help "
"message of this command below for more "
"information.\n"
"USE WISELY AND AT YOUR OWN RISK!")
parser.add_argument('-n', '--name',
dest="name",
required=False,
default=argparse.SUPPRESS,
help="Annotate the run analysis with a custom name in "
"the created metadata file.")
analyzer_opts = parser.add_argument_group("analyzer arguments")
analyzer_opts.add_argument('--analyzers',
nargs='+',
dest='analyzers',
metavar='ANALYZER',
required=False,
choices=analyzer_types.supported_analyzers,
default=argparse.SUPPRESS,
help="Run analysis only with the analyzers "
"specified. Currently supported analyzers "
"are: " +
', '.join(analyzer_types.
supported_analyzers) + ".")
analyzer_opts.add_argument('--capture-analysis-output',
dest='capture_analysis_output',
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Store standard output and standard error "
"of successful analyzer invocations "
"into the '<OUTPUT_DIR>/success' "
"directory.")
analyzer_opts.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'analyzer' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"analyzer\": [\n"
" \"--enable=core.DivideZero\",\n"
" \"--enable=core.CallAndMessage\",\n"
" \"--report-hash=context-free-v2\",\n"
" \"--verbose=debug\",\n"
" \"--clean\"\n"
" ]\n"
"}")
analyzer_opts.add_argument('--saargs',
dest="clangsa_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for the Clang Static "
"Analyzer.")
analyzer_opts.add_argument('--tidyargs',
dest="tidy_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for Clang-Tidy.")
analyzer_opts.add_argument('--tidy-config',
dest='tidy_config',
required=False,
default=argparse.SUPPRESS,
help="A file in YAML format containing the "
"configuration of clang-tidy checkers. "
"The file can be dumped by "
"'CodeChecker analyzers --dump-config "
"clang-tidy' command.")
analyzer_opts.add_argument('--analyzer-config',
dest='analyzer_config',
nargs='*',
default=["clang-tidy:HeaderFilterRegex=.*"],
help="Analyzer configuration options in the "
"following format: analyzer:key=value. "
"The collection of the options can be "
"printed with "
"'CodeChecker analyzers "
"--analyzer-config'. To disable the "
"default behaviour of this option you can "
"use the "
"'clang-tidy:take-config-from-directory="
"true' option.")
analyzer_opts.add_argument('--checker-config',
dest='checker_config',
nargs='*',
default=argparse.SUPPRESS,
help="Checker configuration options in the "
"following format: analyzer:key=value. "
"The collection of the options can be "
"printed with "
"'CodeChecker checkers --checker-config'.")
analyzer_opts.add_argument('--timeout',
type=int,
dest='timeout',
required=False,
default=argparse.SUPPRESS,
help="The amount of time (in seconds) that "
"each analyzer can spend, individually, "
"to analyze the project. If the analysis "
"of a particular file takes longer than "
"this time, the analyzer is killed and "
"the analysis is considered as a failed "
"one.")
context = analyzer_context.get_context()
clang_has_z3 = analyzer_types.is_z3_capable(context)
if clang_has_z3:
analyzer_opts.add_argument('--z3',
dest='enable_z3',
choices=['on', 'off'],
default='off',
help="Enable the z3 solver backend. This "
"allows reasoning over more complex "
"queries, but performance is worse "
"than the default range-based "
"constraint solver.")
clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context)
if clang_has_z3_refutation:
analyzer_opts.add_argument('--z3-refutation',
dest='enable_z3_refutation',
choices=['on', 'off'],
default='on' if clang_has_z3_refutation
else 'off',
help="Switch on/off the Z3 SMT Solver "
"backend to "
"reduce false positives. The results "
"of the ranged based constraint "
"solver in the Clang Static Analyzer "
"will be cross checked with the Z3 "
"SMT solver. This should not cause "
"that much of a slowdown compared to "
"using the Z3 solver only.")
if analyzer_types.is_ctu_capable(context):
ctu_opts = parser.add_argument_group(
"cross translation unit analysis arguments",
"""
These arguments are only available if the Clang Static Analyzer supports
Cross-TU analysis. By default, no CTU analysis is run when
'CodeChecker analyze' is called.""")
ctu_modes = ctu_opts.add_mutually_exclusive_group()
ctu_modes.add_argument('--ctu', '--ctu-all',
action='store_const',
const=[True, True],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform Cross Translation Unit (CTU) "
"analysis, both 'collect' and 'analyze' "
"phases. In this mode, the extra files "
"created by 'collect' are cleaned up "
"after the analysis.")
ctu_modes.add_argument('--ctu-collect',
action='store_const',
const=[True, False],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform the first, 'collect' phase of "
"Cross-TU analysis. This phase generates "
"extra files needed by CTU analysis, and "
"puts them into '<OUTPUT_DIR>/ctu-dir'. "
"NOTE: If this argument is present, "
"CodeChecker will NOT execute the "
"analyzers!")
ctu_modes.add_argument('--ctu-analyze',
action='store_const',
const=[False, True],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform the second, 'analyze' phase of "
"Cross-TU analysis, using already "
"available extra files in "
"'<OUTPUT_DIR>/ctu-dir'. (These files "
"will not be cleaned up in this mode.)")
ctu_opts.add_argument('--ctu-reanalyze-on-failure',
action='store_true',
dest='ctu_reanalyze_on_failure',
default=argparse.SUPPRESS,
help="DEPRECATED. The flag will be removed. "
"If Cross-TU analysis is enabled and fails "
"for some reason, try to re analyze the "
"same translation unit without "
"Cross-TU enabled.")
# Only check for AST loading modes if CTU is available.
if analyzer_types.is_ctu_on_demand_available(context):
ctu_opts.add_argument('--ctu-ast-mode',
action='store',
dest='ctu_ast_mode',
choices=['load-from-pch', 'parse-on-demand'],
default='load-from-pch',
help="Choose the way ASTs are loaded during "
"CTU analysis. Mode 'load-from-pch' "
"generates PCH format serialized ASTs "
"during the 'collect' phase. Mode "
"'parse-on-demand' only generates the "
"invocations needed to parse the ASTs. "
"Mode 'load-from-pch' can use "
"significant disk-space for the "
"serialized ASTs, while mode "
"'parse-on-demand' can incur some "
"runtime CPU overhead in the second "
"phase of the analysis.")
if analyzer_types.is_statistics_capable(context):
stat_opts = parser.add_argument_group(
"Statistics analysis feature arguments",
"""
These arguments are only available if the Clang Static Analyzer supports
Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck,
statisticsCollector.SpecialReturnValue checkers are available).""")
stat_opts.add_argument('--stats-collect', '--stats-collect',
action='store',
default=argparse.SUPPRESS,
dest='stats_output',
help="Perform the first, 'collect' phase of "
"Statistical analysis. This phase "
"generates extra files needed by "
"statistics analysis, and "
"puts them into "
"'<STATS_OUTPUT>'."
" NOTE: If this argument is present, "
"CodeChecker will NOT execute the "
"analyzers!")
stat_opts.add_argument('--stats-use', '--stats-use',
action='store',
default=argparse.SUPPRESS,
dest='stats_dir',
help="Use the previously generated statistics "
"results for the analysis from the given "
"'<STATS_DIR>'.")
stat_opts.add_argument('--stats',
action='store_true',
default=argparse.SUPPRESS,
dest='stats_enabled',
help="Perform both phases of "
"Statistical analysis. This phase "
"generates extra files needed by "
"statistics analysis and enables "
"the statistical checkers. "
"No need to enable them explicitly.")
stat_opts.add_argument('--stats-min-sample-count',
action='store',
default="10",
type=int,
dest='stats_min_sample_count',
help="Minimum number of samples (function call"
" occurrences) to be collected"
" for a statistics to be relevant "
"'<MIN-SAMPLE-COUNT>'.")
stat_opts.add_argument('--stats-relevance-threshold',
action='store',
default="0.85",
type=float,
dest='stats_relevance_threshold',
help="The minimum ratio of calls of function "
"f that must have a certain property "
"property to consider it true for that "
"function (calculated as calls "
"with a property/all calls)."
" CodeChecker will warn for"
" calls of f do not have that property."
"'<RELEVANCE_THRESHOLD>'.")
checkers_opts = parser.add_argument_group(
"checker configuration",
"""
Checkers
------------------------------------------------
The analyzer performs checks that are categorized into families or "checkers".
See 'CodeChecker checkers' for the list of available checkers. You can
fine-tune which checkers to use in the analysis by setting the enabled and
disabled flags starting from the bigger groups and going inwards, e.g.
'-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every
'core' checker, but only 'core.uninitialized.Assign' from the
'core.uninitialized' group. Please consult the manual for details. Disabling
certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang
community, and thus discouraged.
Compiler warnings and errors
------------------------------------------------
Compiler warnings are diagnostic messages that report constructions that are
not inherently erroneous but that are risky or suggest there may have been an
error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g.
Clang warning controlled by '-Wliteral-conversion' will be reported with check
name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to
use in the analysis by setting the enabled and disabled flags starting from the
bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will
enable every 'unused' warnings except 'unused-parameter'. These flags should
start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.:
'-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and
'-Wextra' warnings are enabled. For more information see:
https://clang.llvm.org/docs/DiagnosticsReference.html.
Sometimes GCC is more permissive than Clang, so it is possible that a specific
construction doesn't compile with Clang but compiles with GCC. These
compiler errors are also collected as CodeChecker reports as
'clang-diagnostic-error'.
Note that compiler errors and warnings are captured by CodeChecker only if it
was emitted by clang-tidy.""")
checkers_opts.add_argument('-e', '--enable',
dest="enable",
metavar='checker/group/profile',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group), "
"profile or guideline "
"to BE USED in the analysis. In case of "
"ambiguity the priority order is profile, "
"guideline, checker name (e.g. security "
"means the profile, not the checker "
"group). 'profile:' and 'guideline:' "
"prefixes can be used. For example: 'profile:security', 'guideline:sei-cert'.")
checkers_opts.add_argument('-d', '--disable',
dest="disable",
metavar='checker/group/profile',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group), "
"profile or guideline "
"to BE PROHIBITED from use in the "
"analysis. In case of "
"ambiguity the priority order is profile, "
"guideline, checker name (e.g. security "
"means the profile, not the checker "
"group). 'profile:' and 'guideline:' "
"prefixes can be used.")
checkers_opts.add_argument('--enable-all',
dest="enable_all",
action='store_true',
required=False,
default=argparse.SUPPRESS,
help="Force the running analyzers to use "
"almost every checker available. The "
"checker groups 'alpha.', 'debug.' and "
"'osx.' (on Linux) are NOT enabled "
"automatically and must be EXPLICITLY "
"specified. WARNING! Enabling all "
"checkers might result in the analysis "
"losing precision and stability, and "
"could even result in a total failure of "
"the analysis. USE WISELY AND AT YOUR "
"OWN RISK!")
logger.add_verbose_arguments(parser)
parser.set_defaults(func=main,
func_process_config_file=process_config_file)
|
31,769 |
def delete_context_table_records(client: Client, args: Dict) -> Tuple[Any, Dict[str, Any], Optional[Any]]:
""" Deletes records of a context table.
Args:
client: Client
args: Dict
"""
context_table_name = args['context_table_name']
session_id = args.get('session_id')
records = argToList(args.get('records'))
records_raw_data = client.list_context_table_records_request(context_table_name, 10000, 1)
all_records = records_raw_data.get('records', [])
filtered = list(filter(lambda record: (record['key'] in records), all_records))
ids = [record['id'] for record in filtered]
record_updates_raw_data = client.delete_context_table_records_request(context_table_name, ids, session_id)
human_readable, entry_context = create_context_table_updates_outputs(context_table_name, record_updates_raw_data)
return human_readable, entry_context, record_updates_raw_data
|
def delete_context_table_records(client: Client, args: Dict) -> Tuple[Any, Dict[str, Any], Optional[Any]]:
""" Deletes records of a context table.
Args:
client: Client
args: Dict
"""
context_table_name = args['context_table_name']
session_id = args.get('session_id')
records = argToList(args.get('records'))
records_raw_data = client.list_context_table_records_request(context_table_name, 10000, 1)
all_records = records_raw_data.get('records', [])
ids = [record['id'] for record in filtered if record['key'] in records]
record_updates_raw_data = client.delete_context_table_records_request(context_table_name, ids, session_id)
human_readable, entry_context = create_context_table_updates_outputs(context_table_name, record_updates_raw_data)
return human_readable, entry_context, record_updates_raw_data
|
43,084 |
def loads(s, ir="blackbird"):
"""Load a quantum program from a string.
Args:
s (str): string containing the Blackbird or XIR circuit
ir (str): Intermediate representation language to use. Can be either "blackbird" or "xir".
Returns:
prog (Program): Strawberry Fields program
"""
if ir == "xir":
prog = xir.parse_script(s)
elif ir == "blackbird":
prog = blackbird.loads(s)
else:
raise ValueError(
f"'{ir}' not recognized as a valid XIR option. Valid options are 'xir' and 'blackbird'."
)
return to_program(prog)
|
def loads(s, ir="blackbird"):
"""Load a quantum program from a string.
Args:
s (str): string containing the Blackbird or XIR circuit
ir (str): Intermediate representation language to use. Can be either "blackbird" or "xir".
Returns:
prog (Program): Strawberry Fields program
"""
if ir == "xir":
prog = xir.parse_script(s)
elif ir == "blackbird":
prog = blackbird.loads(s)
else:
raise ValueError(
f"'{ir}' not recognized as a valid IR option. Valid options are 'xir' and 'blackbird'."
)
return to_program(prog)
|
17,724 |
def create_hoomd_simulation(
structure,
ref_distance=1.0,
ref_mass=1.0,
ref_energy=1.0,
r_cut=1.2,
auto_scale=False,
snapshot_kwargs={},
pppm_kwargs={"Nx": 8, "Ny": 8, "Nz": 8, "order": 4},
init_snap=None,
):
""" Convert a parametrized pmd.Structure to hoomd.SimulationContext
Parameters
----------
structure : parmed.Structure
ParmEd Structure object
ref_distance : float, optional, default=1.0
Reference distance for conversion to reduced units
ref_mass : float, optional, default=1.0
Reference mass for conversion to reduced units
ref_energy : float, optional, default=1.0
Reference energy for conversion to reduced units
r_cut : float, optional, default 1.2
Cutoff radius, in reduced units
auto_scale : bool, optional, default=False
Automatically use largest sigma value as ref_distance,
largest mass value as ref_mass,
and largest epsilon value as ref_energy
snapshot_kwargs : dict
Kwargs to pass to to_hoomdsnapshot
pppm_kwargs : dict
Kwargs to pass to hoomd's pppm function
init_snap : hoomd.data.SnapshotParticleData, optional, default=None
Initial snapshot to which to add the ParmEd structure object
(useful for rigid bodies)
Returns
------
hoomd_objects : list
List of hoomd objects created during conversion
ReferenceValues : namedtuple
Values used in scaling
Notes
-----
While the hoomd objects are returned, the
hoomd.SimulationContext is accessible via `hoomd.context.current`.
If you pass a non-parametrized pmd.Structure, you will not have
angle, dihedral, or force field information. You may be better off
creating a hoomd.Snapshot
Reference units should be expected to convert parmed Structure units :
angstroms, kcal/mol, and daltons
"""
if isinstance(structure, mb.Compound):
raise ValueError(
"You passed mb.Compound to create_hoomd_simulation, "
+ "there will be no angles, dihedrals, or force field parameters. "
+ "Please use "
+ "hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, "
+ "then create your own hoomd context "
+ "and pass your hoomd.Snapshot "
+ "to hoomd.init.read_snapshot()"
)
elif not isinstance(structure, pmd.Structure):
raise ValueError(
"Please pass a parmed.Structure to " + "create_hoomd_simulation"
)
_check_hoomd_version()
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) >= 3:
warnings.warn(
"Warning when using Hoomd 3, potential API change "
+ "where the hoomd context is not updated upon "
+ "creation of forces - utilize "
+ "the returned `hoomd_objects`"
)
hoomd_objects = [] # Potential adaptation for Hoomd v3 API
if auto_scale:
ref_mass = max([atom.mass for atom in structure.atoms])
pair_coeffs = list(
set((atom.type, atom.epsilon, atom.sigma) for atom in structure.atoms)
)
ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1]
ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2]
ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"])
ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy)
if not hoomd.context.current:
hoomd.context.initialize("")
snapshot, _ = to_hoomdsnapshot(
structure,
ref_distance=ref_distance,
ref_mass=ref_mass,
ref_energy=ref_energy,
**snapshot_kwargs,
hoomd_snapshot=init_snap
)
hoomd_objects.append(snapshot)
hoomd.init.read_snapshot(snapshot)
nl = hoomd.md.nlist.cell()
nl.reset_exclusions(exclusions=["1-2", "1-3"])
hoomd_objects.append(nl)
if structure.atoms[0].type != "":
print("Processing LJ and QQ")
lj = _init_hoomd_lj(
structure,
nl,
r_cut=r_cut,
ref_distance=ref_distance,
ref_energy=ref_energy
)
qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs)
hoomd_objects.append(lj)
hoomd_objects.append(qq)
if structure.adjusts:
print("Processing 1-4 interactions, adjusting neighborlist exclusions")
lj_14, qq_14 = _init_hoomd_14_pairs(
structure, nl, ref_distance=ref_distance, ref_energy=ref_energy
)
hoomd_objects.append(lj_14)
hoomd_objects.append(qq_14)
if structure.bond_types:
print("Processing harmonic bonds")
harmonic_bond = _init_hoomd_bonds(
structure, ref_distance=ref_distance, ref_energy=ref_energy
)
hoomd_objects.append(harmonic_bond)
if structure.angle_types:
print("Processing harmonic angles")
harmonic_angle = _init_hoomd_angles(structure, ref_energy=ref_energy)
hoomd_objects.append(harmonic_angle)
if structure.dihedral_types:
print("Processing periodic torsions")
periodic_torsions = _init_hoomd_dihedrals(
structure,
ref_energy=ref_energy
)
hoomd_objects.append(periodic_torsions)
if structure.rb_torsion_types:
print("Processing RB torsions")
rb_torsions = _init_hoomd_rb_torsions(structure, ref_energy=ref_energy)
hoomd_objects.append(rb_torsions)
print("HOOMD SimulationContext updated from ParmEd Structure")
return hoomd_objects, ref_values
|
def create_hoomd_simulation(
structure,
ref_distance=1.0,
ref_mass=1.0,
ref_energy=1.0,
r_cut=1.2,
auto_scale=False,
snapshot_kwargs={},
pppm_kwargs={"Nx": 8, "Ny": 8, "Nz": 8, "order": 4},
init_snap=None,
):
""" Convert a parametrized pmd.Structure to hoomd.SimulationContext
Parameters
----------
structure : parmed.Structure
ParmEd Structure object
ref_distance : float, optional, default=1.0
Reference distance for conversion to reduced units
ref_mass : float, optional, default=1.0
Reference mass for conversion to reduced units
ref_energy : float, optional, default=1.0
Reference energy for conversion to reduced units
r_cut : float, optional, default 1.2
Cutoff radius, in reduced units
auto_scale : bool, optional, default=False
Automatically use largest sigma value as ref_distance,
largest mass value as ref_mass,
and largest epsilon value as ref_energy
snapshot_kwargs : dict
Kwargs to pass to to_hoomdsnapshot
pppm_kwargs : dict
Kwargs to pass to hoomd's pppm function
init_snap : hoomd.data.SnapshotParticleData, optional, default=None
Initial snapshot to which to add the ParmEd structure object
(useful for rigid bodies)
Returns
------
hoomd_objects : list
List of hoomd objects created during conversion
ReferenceValues : namedtuple
Values used in scaling
Notes
-----
While the hoomd objects are returned, the
hoomd.SimulationContext is accessible via `hoomd.context.current`.
If you pass a non-parametrized pmd.Structure, you will not have
angle, dihedral, or force field information. You may be better off
creating a hoomd.Snapshot
Reference units should be expected to convert parmed Structure units :
angstroms, kcal/mol, and daltons
"""
if isinstance(structure, mb.Compound):
raise ValueError(
"You passed mb.Compound to create_hoomd_simulation, "
+ "there will be no angles, dihedrals, or force field parameters. "
+ "Please use "
+ "hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, "
+ "then create your own hoomd context "
+ "and pass your hoomd.Snapshot "
+ "to hoomd.init.read_snapshot()"
)
elif not isinstance(structure, pmd.Structure):
raise ValueError(
"Please pass a parmed.Structure to create_hoomd_simulation"
)
_check_hoomd_version()
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) >= 3:
warnings.warn(
"Warning when using Hoomd 3, potential API change "
+ "where the hoomd context is not updated upon "
+ "creation of forces - utilize "
+ "the returned `hoomd_objects`"
)
hoomd_objects = [] # Potential adaptation for Hoomd v3 API
if auto_scale:
ref_mass = max([atom.mass for atom in structure.atoms])
pair_coeffs = list(
set((atom.type, atom.epsilon, atom.sigma) for atom in structure.atoms)
)
ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1]
ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2]
ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"])
ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy)
if not hoomd.context.current:
hoomd.context.initialize("")
snapshot, _ = to_hoomdsnapshot(
structure,
ref_distance=ref_distance,
ref_mass=ref_mass,
ref_energy=ref_energy,
**snapshot_kwargs,
hoomd_snapshot=init_snap
)
hoomd_objects.append(snapshot)
hoomd.init.read_snapshot(snapshot)
nl = hoomd.md.nlist.cell()
nl.reset_exclusions(exclusions=["1-2", "1-3"])
hoomd_objects.append(nl)
if structure.atoms[0].type != "":
print("Processing LJ and QQ")
lj = _init_hoomd_lj(
structure,
nl,
r_cut=r_cut,
ref_distance=ref_distance,
ref_energy=ref_energy
)
qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs)
hoomd_objects.append(lj)
hoomd_objects.append(qq)
if structure.adjusts:
print("Processing 1-4 interactions, adjusting neighborlist exclusions")
lj_14, qq_14 = _init_hoomd_14_pairs(
structure, nl, ref_distance=ref_distance, ref_energy=ref_energy
)
hoomd_objects.append(lj_14)
hoomd_objects.append(qq_14)
if structure.bond_types:
print("Processing harmonic bonds")
harmonic_bond = _init_hoomd_bonds(
structure, ref_distance=ref_distance, ref_energy=ref_energy
)
hoomd_objects.append(harmonic_bond)
if structure.angle_types:
print("Processing harmonic angles")
harmonic_angle = _init_hoomd_angles(structure, ref_energy=ref_energy)
hoomd_objects.append(harmonic_angle)
if structure.dihedral_types:
print("Processing periodic torsions")
periodic_torsions = _init_hoomd_dihedrals(
structure,
ref_energy=ref_energy
)
hoomd_objects.append(periodic_torsions)
if structure.rb_torsion_types:
print("Processing RB torsions")
rb_torsions = _init_hoomd_rb_torsions(structure, ref_energy=ref_energy)
hoomd_objects.append(rb_torsions)
print("HOOMD SimulationContext updated from ParmEd Structure")
return hoomd_objects, ref_values
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.